xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 40ea51dee3aa8ae6f07ff8bf1299bfe7799a4db5)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <mm/core_memprot.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_pager.h>
42 #include <types_ext.h>
43 #include <stdlib.h>
44 #include <tee_api_defines.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <trace.h>
47 #include <utee_defines.h>
48 #include <util.h>
49 
50 #include "pager_private.h"
51 
52 #define PAGER_AE_KEY_BITS	256
53 
54 struct pager_rw_pstate {
55 	uint64_t iv;
56 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
57 };
58 
59 enum area_type {
60 	AREA_TYPE_RO,
61 	AREA_TYPE_RW,
62 	AREA_TYPE_LOCK,
63 };
64 
65 struct tee_pager_area {
66 	union {
67 		const uint8_t *hashes;
68 		struct pager_rw_pstate *rwp;
69 	} u;
70 	uint8_t *store;
71 	enum area_type type;
72 	uint32_t flags;
73 	vaddr_t base;
74 	size_t size;
75 	struct pgt *pgt;
76 	TAILQ_ENTRY(tee_pager_area) link;
77 };
78 
79 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
80 
81 static struct tee_pager_area_head tee_pager_area_head =
82 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
83 
84 #define INVALID_PGIDX	UINT_MAX
85 
86 /*
87  * struct tee_pager_pmem - Represents a physical page used for paging.
88  *
89  * @pgidx	an index of the entry in area->ti.
90  * @va_alias	Virtual address where the physical page always is aliased.
91  *		Used during remapping of the page when the content need to
92  *		be updated before it's available at the new location.
93  * @area	a pointer to the pager area
94  */
95 struct tee_pager_pmem {
96 	unsigned pgidx;
97 	void *va_alias;
98 	struct tee_pager_area *area;
99 	TAILQ_ENTRY(tee_pager_pmem) link;
100 };
101 
102 /* The list of physical pages. The first page in the list is the oldest */
103 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
104 
105 static struct tee_pager_pmem_head tee_pager_pmem_head =
106 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
107 
108 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
109 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
110 
111 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
112 
113 /* number of pages hidden */
114 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
115 
116 /* Number of registered physical pages, used hiding pages. */
117 static size_t tee_pager_npages;
118 
119 #ifdef CFG_WITH_STATS
120 static struct tee_pager_stats pager_stats;
121 
122 static inline void incr_ro_hits(void)
123 {
124 	pager_stats.ro_hits++;
125 }
126 
127 static inline void incr_rw_hits(void)
128 {
129 	pager_stats.rw_hits++;
130 }
131 
132 static inline void incr_hidden_hits(void)
133 {
134 	pager_stats.hidden_hits++;
135 }
136 
137 static inline void incr_zi_released(void)
138 {
139 	pager_stats.zi_released++;
140 }
141 
142 static inline void incr_npages_all(void)
143 {
144 	pager_stats.npages_all++;
145 }
146 
147 static inline void set_npages(void)
148 {
149 	pager_stats.npages = tee_pager_npages;
150 }
151 
152 void tee_pager_get_stats(struct tee_pager_stats *stats)
153 {
154 	*stats = pager_stats;
155 
156 	pager_stats.hidden_hits = 0;
157 	pager_stats.ro_hits = 0;
158 	pager_stats.rw_hits = 0;
159 	pager_stats.zi_released = 0;
160 }
161 
162 #else /* CFG_WITH_STATS */
163 static inline void incr_ro_hits(void) { }
164 static inline void incr_rw_hits(void) { }
165 static inline void incr_hidden_hits(void) { }
166 static inline void incr_zi_released(void) { }
167 static inline void incr_npages_all(void) { }
168 static inline void set_npages(void) { }
169 
170 void tee_pager_get_stats(struct tee_pager_stats *stats)
171 {
172 	memset(stats, 0, sizeof(struct tee_pager_stats));
173 }
174 #endif /* CFG_WITH_STATS */
175 
176 static struct pgt pager_core_pgt;
177 struct core_mmu_table_info tee_pager_tbl_info;
178 static struct core_mmu_table_info pager_alias_tbl_info;
179 
180 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
181 
182 /* Defines the range of the alias area */
183 static tee_mm_entry_t *pager_alias_area;
184 /*
185  * Physical pages are added in a stack like fashion to the alias area,
186  * @pager_alias_next_free gives the address of next free entry if
187  * @pager_alias_next_free is != 0
188  */
189 static uintptr_t pager_alias_next_free;
190 
191 static uint32_t pager_lock(void)
192 {
193 	return cpu_spin_lock_xsave(&pager_spinlock);
194 }
195 
196 static void pager_unlock(uint32_t exceptions)
197 {
198 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
199 }
200 
201 static void set_alias_area(tee_mm_entry_t *mm)
202 {
203 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
204 	size_t tbl_va_size;
205 	unsigned idx;
206 	unsigned last_idx;
207 	vaddr_t smem = tee_mm_get_smem(mm);
208 	size_t nbytes = tee_mm_get_bytes(mm);
209 
210 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
211 
212 	if (pager_alias_area)
213 		panic("null pager_alias_area");
214 
215 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
216 		panic("Can't find translation table");
217 
218 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
219 		panic("Unsupported page size in translation table");
220 
221 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
222 	if (!core_is_buffer_inside(smem, nbytes,
223 				   ti->va_base, tbl_va_size)) {
224 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
225 		     smem, nbytes, ti->va_base, tbl_va_size);
226 		panic();
227 	}
228 
229 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
230 		panic("invalid area alignment");
231 
232 	pager_alias_area = mm;
233 	pager_alias_next_free = smem;
234 
235 	/* Clear all mapping in the alias area */
236 	idx = core_mmu_va2idx(ti, smem);
237 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
238 	for (; idx < last_idx; idx++)
239 		core_mmu_set_entry(ti, idx, 0, 0);
240 
241 	/* TODO only invalidate entries touched above */
242 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
243 }
244 
245 static void generate_ae_key(void)
246 {
247 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
248 		panic("failed to generate random");
249 }
250 
251 void tee_pager_init(tee_mm_entry_t *mm_alias)
252 {
253 	set_alias_area(mm_alias);
254 	generate_ae_key();
255 }
256 
257 static void *pager_add_alias_page(paddr_t pa)
258 {
259 	unsigned idx;
260 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
261 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
262 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
263 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
264 
265 	DMSG("0x%" PRIxPA, pa);
266 
267 	if (!pager_alias_next_free || !ti->num_entries)
268 		panic("invalid alias entry");
269 
270 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
271 	core_mmu_set_entry(ti, idx, pa, attr);
272 	pgt_inc_used_entries(&pager_core_pgt);
273 	pager_alias_next_free += SMALL_PAGE_SIZE;
274 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
275 				      tee_mm_get_bytes(pager_alias_area)))
276 		pager_alias_next_free = 0;
277 	return (void *)core_mmu_idx2va(ti, idx);
278 }
279 
280 static struct tee_pager_area *alloc_area(struct pgt *pgt,
281 					 vaddr_t base, size_t size,
282 					 uint32_t flags, const void *store,
283 					 const void *hashes)
284 {
285 	struct tee_pager_area *area = calloc(1, sizeof(*area));
286 	enum area_type at;
287 	tee_mm_entry_t *mm_store = NULL;
288 
289 	if (!area)
290 		return NULL;
291 
292 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
293 		if (flags & TEE_MATTR_LOCKED) {
294 			at = AREA_TYPE_LOCK;
295 			goto out;
296 		}
297 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
298 		if (!mm_store)
299 			goto bad;
300 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
301 					   MEM_AREA_TA_RAM);
302 		if (!area->store)
303 			goto bad;
304 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
305 				     sizeof(struct pager_rw_pstate));
306 		if (!area->u.rwp)
307 			goto bad;
308 		at = AREA_TYPE_RW;
309 	} else {
310 		area->store = (void *)store;
311 		area->u.hashes = hashes;
312 		at = AREA_TYPE_RO;
313 	}
314 out:
315 	area->pgt = pgt;
316 	area->base = base;
317 	area->size = size;
318 	area->flags = flags;
319 	area->type = at;
320 	return area;
321 bad:
322 	tee_mm_free(mm_store);
323 	free(area->u.rwp);
324 	free(area);
325 	return NULL;
326 }
327 
328 static void area_insert_tail(struct tee_pager_area *area)
329 {
330 	uint32_t exceptions = pager_lock();
331 
332 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
333 
334 	pager_unlock(exceptions);
335 }
336 KEEP_PAGER(area_insert_tail);
337 
338 static size_t tbl_usage_count(struct pgt *pgt)
339 {
340 	size_t n;
341 	paddr_t pa;
342 	size_t usage = 0;
343 
344 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
345 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
346 					     n, &pa, NULL);
347 		if (pa)
348 			usage++;
349 	}
350 	return usage;
351 }
352 
353 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
354 			const void *store, const void *hashes)
355 {
356 	struct tee_pager_area *area;
357 	size_t tbl_va_size;
358 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
359 
360 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
361 		base, base + size, flags, store, hashes);
362 
363 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
364 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
365 		panic();
366 	}
367 
368 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
369 		panic("write pages cannot provide store or hashes");
370 
371 	if ((flags & TEE_MATTR_PW) && (store || hashes))
372 		panic("non-write pages must provide store and hashes");
373 
374 	if (!pager_core_pgt.tbl) {
375 		pager_core_pgt.tbl = ti->table;
376 		pgt_set_used_entries(&pager_core_pgt,
377 				     tbl_usage_count(&pager_core_pgt));
378 	}
379 
380 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
381 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
382 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
383 			base, size, ti->va_base, tbl_va_size);
384 		return false;
385 	}
386 
387 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
388 	if (!area)
389 		return false;
390 
391 	area_insert_tail(area);
392 	return true;
393 }
394 
395 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
396 					vaddr_t va)
397 {
398 	struct tee_pager_area *area;
399 
400 	if (!areas)
401 		return NULL;
402 
403 	TAILQ_FOREACH(area, areas, link) {
404 		if (core_is_buffer_inside(va, 1, area->base, area->size))
405 			return area;
406 	}
407 	return NULL;
408 }
409 
410 #ifdef CFG_PAGED_USER_TA
411 static struct tee_pager_area *find_uta_area(vaddr_t va)
412 {
413 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
414 
415 	if (!ctx || !is_user_ta_ctx(ctx))
416 		return NULL;
417 	return find_area(to_user_ta_ctx(ctx)->areas, va);
418 }
419 #else
420 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
421 {
422 	return NULL;
423 }
424 #endif /*CFG_PAGED_USER_TA*/
425 
426 
427 static uint32_t get_area_mattr(uint32_t area_flags)
428 {
429 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
430 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
431 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
432 
433 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
434 		attr |= TEE_MATTR_GLOBAL;
435 
436 	return attr;
437 }
438 
439 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
440 {
441 	paddr_t pa;
442 	unsigned idx;
443 
444 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
445 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
446 	return pa;
447 }
448 
449 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
450 			void *dst)
451 {
452 	struct pager_aes_gcm_iv iv = {
453 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
454 	};
455 
456 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
457 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
458 }
459 
460 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
461 {
462 	struct pager_aes_gcm_iv iv;
463 
464 	assert((rwp->iv + 1) > rwp->iv);
465 	rwp->iv++;
466 	/*
467 	 * IV is constructed as recommended in section "8.2.1 Deterministic
468 	 * Construction" of "Recommendation for Block Cipher Modes of
469 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
470 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
471 	 */
472 	iv.iv[0] = (vaddr_t)rwp;
473 	iv.iv[1] = rwp->iv >> 32;
474 	iv.iv[2] = rwp->iv;
475 
476 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
477 				   &iv, rwp->tag,
478 				   src, dst, SMALL_PAGE_SIZE))
479 		panic("gcm failed");
480 }
481 
482 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
483 			void *va_alias)
484 {
485 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
486 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
487 
488 	switch (area->type) {
489 	case AREA_TYPE_RO:
490 		{
491 			const void *hash = area->u.hashes +
492 					   idx * TEE_SHA256_HASH_SIZE;
493 
494 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
495 			incr_ro_hits();
496 
497 			if (hash_sha256_check(hash, va_alias,
498 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
499 				EMSG("PH 0x%" PRIxVA " failed", page_va);
500 				panic();
501 			}
502 		}
503 		break;
504 	case AREA_TYPE_RW:
505 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
506 			va_alias, page_va, area->u.rwp[idx].iv);
507 		if (!area->u.rwp[idx].iv)
508 			memset(va_alias, 0, SMALL_PAGE_SIZE);
509 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
510 				       va_alias)) {
511 			EMSG("PH 0x%" PRIxVA " failed", page_va);
512 			panic();
513 		}
514 		incr_rw_hits();
515 		break;
516 	case AREA_TYPE_LOCK:
517 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
518 		memset(va_alias, 0, SMALL_PAGE_SIZE);
519 		break;
520 	default:
521 		panic();
522 	}
523 }
524 
525 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
526 {
527 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
528 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
529 
530 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
531 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
532 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
533 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
534 
535 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
536 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
537 			     stored_page);
538 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
539 			pmem->area->base + idx * SMALL_PAGE_SIZE,
540 			pmem->area->u.rwp[idx].iv);
541 	}
542 }
543 
544 static void area_get_entry(struct tee_pager_area *area, size_t idx,
545 			   paddr_t *pa, uint32_t *attr)
546 {
547 	assert(area->pgt);
548 	assert(idx < tee_pager_tbl_info.num_entries);
549 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
550 				     idx, pa, attr);
551 }
552 
553 static void area_set_entry(struct tee_pager_area *area, size_t idx,
554 			   paddr_t pa, uint32_t attr)
555 {
556 	assert(area->pgt);
557 	assert(idx < tee_pager_tbl_info.num_entries);
558 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
559 				     idx, pa, attr);
560 }
561 
562 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
563 {
564 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
565 }
566 
567 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
568 					 size_t idx)
569 {
570 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
571 }
572 
573 #ifdef CFG_PAGED_USER_TA
574 static void free_area(struct tee_pager_area *area)
575 {
576 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
577 				virt_to_phys(area->store)));
578 	if (area->type == AREA_TYPE_RW)
579 		free(area->u.rwp);
580 	free(area);
581 }
582 
583 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
584 			       size_t size)
585 {
586 	struct tee_pager_area *area;
587 	uint32_t flags;
588 	vaddr_t b = base;
589 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
590 
591 	if (!utc->areas) {
592 		utc->areas = malloc(sizeof(*utc->areas));
593 		if (!utc->areas)
594 			return false;
595 		TAILQ_INIT(utc->areas);
596 	}
597 
598 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
599 
600 	while (s) {
601 		size_t s2;
602 
603 		if (find_area(utc->areas, b))
604 			return false;
605 
606 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
607 
608 		/* Table info will be set when the context is activated. */
609 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
610 		if (!area)
611 			return false;
612 		TAILQ_INSERT_TAIL(utc->areas, area, link);
613 		b += s2;
614 		s -= s2;
615 	}
616 
617 	return true;
618 }
619 
620 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
621 {
622 	struct thread_specific_data *tsd = thread_get_tsd();
623 	struct tee_pager_area *area;
624 	struct core_mmu_table_info dir_info = { NULL };
625 
626 	if (&utc->ctx != tsd->ctx) {
627 		/*
628 		 * Changes are to an utc that isn't active. Just add the
629 		 * areas page tables will be dealt with later.
630 		 */
631 		return pager_add_uta_area(utc, base, size);
632 	}
633 
634 	/*
635 	 * Assign page tables before adding areas to be able to tell which
636 	 * are newly added and should be removed in case of failure.
637 	 */
638 	tee_pager_assign_uta_tables(utc);
639 	if (!pager_add_uta_area(utc, base, size)) {
640 		struct tee_pager_area *next_a;
641 
642 		/* Remove all added areas */
643 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
644 			if (!area->pgt) {
645 				TAILQ_REMOVE(utc->areas, area, link);
646 				free_area(area);
647 			}
648 		}
649 		return false;
650 	}
651 
652 	/*
653 	 * Assign page tables to the new areas and make sure that the page
654 	 * tables are registered in the upper table.
655 	 */
656 	tee_pager_assign_uta_tables(utc);
657 	core_mmu_get_user_pgdir(&dir_info);
658 	TAILQ_FOREACH(area, utc->areas, link) {
659 		paddr_t pa;
660 		size_t idx;
661 		uint32_t attr;
662 
663 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
664 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
665 
666 		/*
667 		 * Check if the page table already is used, if it is, it's
668 		 * already registered.
669 		 */
670 		if (area->pgt->num_used_entries) {
671 			assert(attr & TEE_MATTR_TABLE);
672 			assert(pa == virt_to_phys(area->pgt->tbl));
673 			continue;
674 		}
675 
676 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
677 		pa = virt_to_phys(area->pgt->tbl);
678 		assert(pa);
679 		/*
680 		 * Note that the update of the table entry is guaranteed to
681 		 * be atomic.
682 		 */
683 		core_mmu_set_entry(&dir_info, idx, pa, attr);
684 	}
685 
686 	return true;
687 }
688 
689 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
690 				   struct pgt *pgt)
691 {
692 	assert(pgt);
693 	ti->table = pgt->tbl;
694 	ti->va_base = pgt->vabase;
695 	ti->level = tee_pager_tbl_info.level;
696 	ti->shift = tee_pager_tbl_info.shift;
697 	ti->num_entries = tee_pager_tbl_info.num_entries;
698 }
699 
700 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
701 			   vaddr_t new_base)
702 {
703 	uint32_t exceptions = pager_lock();
704 
705 	/*
706 	 * If there's no pgt assigned to the old area there's no pages to
707 	 * deal with either, just update with a new pgt and base.
708 	 */
709 	if (area->pgt) {
710 		struct core_mmu_table_info old_ti;
711 		struct core_mmu_table_info new_ti;
712 		struct tee_pager_pmem *pmem;
713 
714 		init_tbl_info_from_pgt(&old_ti, area->pgt);
715 		init_tbl_info_from_pgt(&new_ti, new_pgt);
716 
717 
718 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
719 			vaddr_t va;
720 			paddr_t pa;
721 			uint32_t attr;
722 
723 			if (pmem->area != area)
724 				continue;
725 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
726 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
727 
728 			assert(pa == get_pmem_pa(pmem));
729 			assert(attr);
730 			assert(area->pgt->num_used_entries);
731 			area->pgt->num_used_entries--;
732 
733 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
734 			va = va - area->base + new_base;
735 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
736 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
737 			new_pgt->num_used_entries++;
738 		}
739 	}
740 
741 	area->pgt = new_pgt;
742 	area->base = new_base;
743 	pager_unlock(exceptions);
744 }
745 KEEP_PAGER(transpose_area);
746 
747 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
748 				   vaddr_t src_base,
749 				   struct user_ta_ctx *dst_utc,
750 				   vaddr_t dst_base, struct pgt **dst_pgt,
751 				   size_t size)
752 {
753 	struct tee_pager_area *area;
754 	struct tee_pager_area *next_a;
755 
756 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
757 		vaddr_t new_area_base;
758 		size_t new_idx;
759 
760 		if (!core_is_buffer_inside(area->base, area->size,
761 					  src_base, size))
762 			continue;
763 
764 		TAILQ_REMOVE(src_utc->areas, area, link);
765 
766 		new_area_base = dst_base + (src_base - area->base);
767 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
768 			  CORE_MMU_PGDIR_SIZE;
769 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
770 		       dst_pgt[new_idx]->vabase);
771 		transpose_area(area, dst_pgt[new_idx], new_area_base);
772 
773 		/*
774 		 * Assert that this will not cause any conflicts in the new
775 		 * utc.  This should already be guaranteed, but a bug here
776 		 * could be tricky to find.
777 		 */
778 		assert(!find_area(dst_utc->areas, area->base));
779 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
780 	}
781 }
782 
783 static void rem_area(struct tee_pager_area_head *area_head,
784 		     struct tee_pager_area *area)
785 {
786 	struct tee_pager_pmem *pmem;
787 	uint32_t exceptions;
788 
789 	exceptions = pager_lock();
790 
791 	TAILQ_REMOVE(area_head, area, link);
792 
793 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
794 		if (pmem->area == area) {
795 			area_set_entry(area, pmem->pgidx, 0, 0);
796 			pgt_dec_used_entries(area->pgt);
797 			pmem->area = NULL;
798 			pmem->pgidx = INVALID_PGIDX;
799 		}
800 	}
801 
802 	pager_unlock(exceptions);
803 	free_area(area);
804 }
805 KEEP_PAGER(rem_area);
806 
807 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
808 			      size_t size)
809 {
810 	struct tee_pager_area *area;
811 	struct tee_pager_area *next_a;
812 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
813 
814 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
815 		if (core_is_buffer_inside(area->base, area->size, base, s))
816 			rem_area(utc->areas, area);
817 	}
818 }
819 
820 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
821 {
822 	struct tee_pager_area *area;
823 
824 	if (!utc->areas)
825 		return;
826 
827 	while (true) {
828 		area = TAILQ_FIRST(utc->areas);
829 		if (!area)
830 			break;
831 		TAILQ_REMOVE(utc->areas, area, link);
832 		free_area(area);
833 	}
834 
835 	free(utc->areas);
836 }
837 
838 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
839 				 size_t size, uint32_t flags)
840 {
841 	bool ret;
842 	vaddr_t b = base;
843 	size_t s = size;
844 	size_t s2;
845 	struct tee_pager_area *area = find_area(utc->areas, b);
846 	uint32_t exceptions;
847 	struct tee_pager_pmem *pmem;
848 	paddr_t pa;
849 	uint32_t a;
850 	uint32_t f;
851 
852 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
853 	if (f & TEE_MATTR_UW)
854 		f |= TEE_MATTR_PW;
855 	f = get_area_mattr(f);
856 
857 	exceptions = pager_lock();
858 
859 	while (s) {
860 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
861 		if (!area || area->base != b || area->size != s2) {
862 			ret = false;
863 			goto out;
864 		}
865 		b += s2;
866 		s -= s2;
867 
868 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
869 			if (pmem->area != area)
870 				continue;
871 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
872 			if (a & TEE_MATTR_VALID_BLOCK)
873 				assert(pa == get_pmem_pa(pmem));
874 			else
875 				pa = get_pmem_pa(pmem);
876 			if (a == f)
877 				continue;
878 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
879 			/* TODO only invalidate entries touched above */
880 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
881 			if (!(flags & TEE_MATTR_UW))
882 				tee_pager_save_page(pmem, a);
883 
884 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
885 
886 			if (flags & TEE_MATTR_UX) {
887 				void *va = (void *)area_idx2va(pmem->area,
888 							       pmem->pgidx);
889 
890 				cache_op_inner(DCACHE_AREA_CLEAN, va,
891 						SMALL_PAGE_SIZE);
892 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
893 						SMALL_PAGE_SIZE);
894 			}
895 		}
896 
897 		area->flags = f;
898 		area = TAILQ_NEXT(area, link);
899 	}
900 
901 	ret = true;
902 out:
903 	pager_unlock(exceptions);
904 	return ret;
905 }
906 KEEP_PAGER(tee_pager_set_uta_area_attr);
907 #endif /*CFG_PAGED_USER_TA*/
908 
909 static bool tee_pager_unhide_page(vaddr_t page_va)
910 {
911 	struct tee_pager_pmem *pmem;
912 
913 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
914 		paddr_t pa;
915 		uint32_t attr;
916 
917 		if (pmem->pgidx == INVALID_PGIDX)
918 			continue;
919 
920 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
921 
922 		if (!(attr &
923 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
924 			continue;
925 
926 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
927 			uint32_t a = get_area_mattr(pmem->area->flags);
928 
929 			/* page is hidden, show and move to back */
930 			if (pa != get_pmem_pa(pmem))
931 				panic("unexpected pa");
932 
933 			/*
934 			 * If it's not a dirty block, then it should be
935 			 * read only.
936 			 */
937 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
938 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
939 			else
940 				FMSG("Unhide %#" PRIxVA, page_va);
941 
942 			if (page_va == 0x8000a000)
943 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
944 					page_va, a);
945 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
946 
947 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
948 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
949 
950 			/* TODO only invalidate entry touched above */
951 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
952 
953 			incr_hidden_hits();
954 			return true;
955 		}
956 	}
957 
958 	return false;
959 }
960 
961 static void tee_pager_hide_pages(void)
962 {
963 	struct tee_pager_pmem *pmem;
964 	size_t n = 0;
965 
966 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
967 		paddr_t pa;
968 		uint32_t attr;
969 		uint32_t a;
970 
971 		if (n >= TEE_PAGER_NHIDE)
972 			break;
973 		n++;
974 
975 		/* we cannot hide pages when pmem->area is not defined. */
976 		if (!pmem->area)
977 			continue;
978 
979 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
980 		if (!(attr & TEE_MATTR_VALID_BLOCK))
981 			continue;
982 
983 		assert(pa == get_pmem_pa(pmem));
984 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
985 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
986 			FMSG("Hide %#" PRIxVA,
987 			     area_idx2va(pmem->area, pmem->pgidx));
988 		} else
989 			a = TEE_MATTR_HIDDEN_BLOCK;
990 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
991 	}
992 
993 	/* TODO only invalidate entries touched above */
994 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
995 }
996 
997 /*
998  * Find mapped pmem, hide and move to pageble pmem.
999  * Return false if page was not mapped, and true if page was mapped.
1000  */
1001 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1002 				       vaddr_t page_va)
1003 {
1004 	struct tee_pager_pmem *pmem;
1005 	unsigned pgidx;
1006 	paddr_t pa;
1007 	uint32_t attr;
1008 
1009 	pgidx = area_va2idx(area, page_va);
1010 	area_get_entry(area, pgidx, &pa, &attr);
1011 
1012 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1013 
1014 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1015 		if (pmem->area != area || pmem->pgidx != pgidx)
1016 			continue;
1017 
1018 		assert(pa == get_pmem_pa(pmem));
1019 		area_set_entry(area, pgidx, 0, 0);
1020 		pgt_dec_used_entries(area->pgt);
1021 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1022 		pmem->area = NULL;
1023 		pmem->pgidx = INVALID_PGIDX;
1024 		tee_pager_npages++;
1025 		set_npages();
1026 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1027 		incr_zi_released();
1028 		return true;
1029 	}
1030 
1031 	return false;
1032 }
1033 
1034 /* Finds the oldest page and unmats it from its old virtual address */
1035 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1036 {
1037 	struct tee_pager_pmem *pmem;
1038 
1039 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1040 	if (!pmem) {
1041 		EMSG("No pmem entries");
1042 		return NULL;
1043 	}
1044 	if (pmem->pgidx != INVALID_PGIDX) {
1045 		uint32_t a;
1046 
1047 		assert(pmem->area && pmem->area->pgt);
1048 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1049 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1050 		pgt_dec_used_entries(pmem->area->pgt);
1051 		/* TODO only invalidate entries touched above */
1052 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1053 		tee_pager_save_page(pmem, a);
1054 	}
1055 
1056 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1057 	pmem->pgidx = INVALID_PGIDX;
1058 	pmem->area = NULL;
1059 	if (area->type == AREA_TYPE_LOCK) {
1060 		/* Move page to lock list */
1061 		if (tee_pager_npages <= 0)
1062 			panic("running out of page");
1063 		tee_pager_npages--;
1064 		set_npages();
1065 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1066 	} else {
1067 		/* move page to back */
1068 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1069 	}
1070 
1071 	return pmem;
1072 }
1073 
1074 static bool pager_update_permissions(struct tee_pager_area *area,
1075 			struct abort_info *ai, bool *handled)
1076 {
1077 	unsigned int pgidx = area_va2idx(area, ai->va);
1078 	uint32_t attr;
1079 	paddr_t pa;
1080 
1081 	*handled = false;
1082 
1083 	area_get_entry(area, pgidx, &pa, &attr);
1084 
1085 	/* Not mapped */
1086 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1087 		return false;
1088 
1089 	/* Not readable, should not happen */
1090 	if (abort_is_user_exception(ai)) {
1091 		if (!(attr & TEE_MATTR_UR))
1092 			return true;
1093 	} else {
1094 		if (!(attr & TEE_MATTR_PR)) {
1095 			abort_print_error(ai);
1096 			panic();
1097 		}
1098 	}
1099 
1100 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1101 	case CORE_MMU_FAULT_TRANSLATION:
1102 	case CORE_MMU_FAULT_READ_PERMISSION:
1103 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1104 			/* Check attempting to execute from an NOX page */
1105 			if (abort_is_user_exception(ai)) {
1106 				if (!(attr & TEE_MATTR_UX))
1107 					return true;
1108 			} else {
1109 				if (!(attr & TEE_MATTR_PX)) {
1110 					abort_print_error(ai);
1111 					panic();
1112 				}
1113 			}
1114 		}
1115 		/* Since the page is mapped now it's OK */
1116 		break;
1117 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1118 		/* Check attempting to write to an RO page */
1119 		if (abort_is_user_exception(ai)) {
1120 			if (!(area->flags & TEE_MATTR_UW))
1121 				return true;
1122 			if (!(attr & TEE_MATTR_UW)) {
1123 				FMSG("Dirty %p",
1124 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1125 				area_set_entry(area, pgidx, pa,
1126 					       get_area_mattr(area->flags));
1127 				/* TODO only invalidate entry above */
1128 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1129 			}
1130 
1131 		} else {
1132 			if (!(area->flags & TEE_MATTR_PW)) {
1133 				abort_print_error(ai);
1134 				panic();
1135 			}
1136 			if (!(attr & TEE_MATTR_PW)) {
1137 				FMSG("Dirty %p",
1138 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1139 				area_set_entry(area, pgidx, pa,
1140 					       get_area_mattr(area->flags));
1141 				/* TODO only invalidate entry above */
1142 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1143 			}
1144 		}
1145 		/* Since permissions has been updated now it's OK */
1146 		break;
1147 	default:
1148 		/* Some fault we can't deal with */
1149 		if (abort_is_user_exception(ai))
1150 			return true;
1151 		abort_print_error(ai);
1152 		panic();
1153 	}
1154 	*handled = true;
1155 	return true;
1156 }
1157 
1158 #ifdef CFG_TEE_CORE_DEBUG
1159 static void stat_handle_fault(void)
1160 {
1161 	static size_t num_faults;
1162 	static size_t min_npages = SIZE_MAX;
1163 	static size_t total_min_npages = SIZE_MAX;
1164 
1165 	num_faults++;
1166 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1167 		DMSG("nfaults %zu npages %zu (min %zu)",
1168 		     num_faults, tee_pager_npages, min_npages);
1169 		min_npages = tee_pager_npages; /* reset */
1170 	}
1171 	if (tee_pager_npages < min_npages)
1172 		min_npages = tee_pager_npages;
1173 	if (tee_pager_npages < total_min_npages)
1174 		total_min_npages = tee_pager_npages;
1175 }
1176 #else
1177 static void stat_handle_fault(void)
1178 {
1179 }
1180 #endif
1181 
1182 bool tee_pager_handle_fault(struct abort_info *ai)
1183 {
1184 	struct tee_pager_area *area;
1185 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1186 	uint32_t exceptions;
1187 	bool ret;
1188 
1189 #ifdef TEE_PAGER_DEBUG_PRINT
1190 	abort_print(ai);
1191 #endif
1192 
1193 	/*
1194 	 * We're updating pages that can affect several active CPUs at a
1195 	 * time below. We end up here because a thread tries to access some
1196 	 * memory that isn't available. We have to be careful when making
1197 	 * that memory available as other threads may succeed in accessing
1198 	 * that address the moment after we've made it available.
1199 	 *
1200 	 * That means that we can't just map the memory and populate the
1201 	 * page, instead we use the aliased mapping to populate the page
1202 	 * and once everything is ready we map it.
1203 	 */
1204 	exceptions = pager_lock();
1205 
1206 	stat_handle_fault();
1207 
1208 	/* check if the access is valid */
1209 	if (abort_is_user_exception(ai)) {
1210 		area = find_uta_area(ai->va);
1211 
1212 	} else {
1213 		area = find_area(&tee_pager_area_head, ai->va);
1214 		if (!area)
1215 			area = find_uta_area(ai->va);
1216 	}
1217 	if (!area || !area->pgt) {
1218 		ret = false;
1219 		goto out;
1220 	}
1221 
1222 	if (!tee_pager_unhide_page(page_va)) {
1223 		struct tee_pager_pmem *pmem = NULL;
1224 		uint32_t attr;
1225 
1226 		/*
1227 		 * The page wasn't hidden, but some other core may have
1228 		 * updated the table entry before we got here or we need
1229 		 * to make a read-only page read-write (dirty).
1230 		 */
1231 		if (pager_update_permissions(area, ai, &ret)) {
1232 			/*
1233 			 * Nothing more to do with the abort. The problem
1234 			 * could already have been dealt with from another
1235 			 * core or if ret is false the TA will be paniced.
1236 			 */
1237 			goto out;
1238 		}
1239 
1240 		pmem = tee_pager_get_page(area);
1241 		if (!pmem) {
1242 			abort_print(ai);
1243 			panic();
1244 		}
1245 
1246 		/* load page code & data */
1247 		tee_pager_load_page(area, page_va, pmem->va_alias);
1248 
1249 		/*
1250 		 * We've updated the page using the aliased mapping and
1251 		 * some cache maintenence is now needed if it's an
1252 		 * executable page.
1253 		 *
1254 		 * Since the d-cache is a Physically-indexed,
1255 		 * physically-tagged (PIPT) cache we can clean the aliased
1256 		 * address instead of the real virtual address.
1257 		 *
1258 		 * The i-cache can also be PIPT, but may be something else
1259 		 * to, to keep it simple we invalidate the entire i-cache.
1260 		 * As a future optimization we may invalidate only the
1261 		 * aliased area if it a PIPT cache else the entire cache.
1262 		 */
1263 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1264 			/*
1265 			 * Doing these operations to LoUIS (Level of
1266 			 * unification, Inner Shareable) would be enough
1267 			 */
1268 			cache_op_inner(DCACHE_AREA_CLEAN, pmem->va_alias,
1269 					SMALL_PAGE_SIZE);
1270 			cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
1271 		}
1272 
1273 		pmem->area = area;
1274 		pmem->pgidx = area_va2idx(area, ai->va);
1275 		attr = get_area_mattr(area->flags) &
1276 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1277 		area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1278 		pgt_inc_used_entries(area->pgt);
1279 
1280 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1281 		     area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1282 
1283 	}
1284 
1285 	tee_pager_hide_pages();
1286 	ret = true;
1287 out:
1288 	pager_unlock(exceptions);
1289 	return ret;
1290 }
1291 
1292 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1293 {
1294 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1295 	size_t n;
1296 
1297 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1298 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1299 
1300 	/* setup memory */
1301 	for (n = 0; n < npages; n++) {
1302 		struct tee_pager_pmem *pmem;
1303 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1304 		unsigned pgidx = core_mmu_va2idx(ti, va);
1305 		paddr_t pa;
1306 		uint32_t attr;
1307 
1308 		/*
1309 		 * Note that we can only support adding pages in the
1310 		 * valid range of this table info, currently not a problem.
1311 		 */
1312 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1313 
1314 		/* Ignore unmapped pages/blocks */
1315 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1316 			continue;
1317 
1318 		pmem = malloc(sizeof(struct tee_pager_pmem));
1319 		if (!pmem)
1320 			panic("out of mem");
1321 
1322 		pmem->va_alias = pager_add_alias_page(pa);
1323 
1324 		if (unmap) {
1325 			pmem->area = NULL;
1326 			pmem->pgidx = INVALID_PGIDX;
1327 			core_mmu_set_entry(ti, pgidx, 0, 0);
1328 			pgt_dec_used_entries(&pager_core_pgt);
1329 		} else {
1330 			/*
1331 			 * The page is still mapped, let's assign the area
1332 			 * and update the protection bits accordingly.
1333 			 */
1334 			pmem->area = find_area(&tee_pager_area_head, va);
1335 			assert(pmem->area->pgt == &pager_core_pgt);
1336 			pmem->pgidx = pgidx;
1337 			assert(pa == get_pmem_pa(pmem));
1338 			area_set_entry(pmem->area, pgidx, pa,
1339 				       get_area_mattr(pmem->area->flags));
1340 		}
1341 
1342 		tee_pager_npages++;
1343 		incr_npages_all();
1344 		set_npages();
1345 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1346 	}
1347 
1348 	/* Invalidate secure TLB */
1349 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1350 }
1351 
1352 #ifdef CFG_PAGED_USER_TA
1353 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1354 {
1355 	struct pgt *p = pgt;
1356 
1357 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1358 		p = SLIST_NEXT(p, link);
1359 	return p;
1360 }
1361 
1362 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1363 {
1364 	struct tee_pager_area *area;
1365 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1366 
1367 	TAILQ_FOREACH(area, utc->areas, link) {
1368 		if (!area->pgt)
1369 			area->pgt = find_pgt(pgt, area->base);
1370 		else
1371 			assert(area->pgt == find_pgt(pgt, area->base));
1372 		if (!area->pgt)
1373 			panic();
1374 	}
1375 }
1376 
1377 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1378 {
1379 	uint32_t attr;
1380 
1381 	assert(pmem->area && pmem->area->pgt);
1382 
1383 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1384 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1385 	tee_pager_save_page(pmem, attr);
1386 	assert(pmem->area->pgt->num_used_entries);
1387 	pmem->area->pgt->num_used_entries--;
1388 	pmem->pgidx = INVALID_PGIDX;
1389 	pmem->area = NULL;
1390 }
1391 
1392 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1393 {
1394 	struct tee_pager_pmem *pmem;
1395 	struct tee_pager_area *area;
1396 	uint32_t exceptions = pager_lock();
1397 
1398 	if (!pgt->num_used_entries)
1399 		goto out;
1400 
1401 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1402 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1403 			continue;
1404 		if (pmem->area->pgt == pgt)
1405 			pager_save_and_release_entry(pmem);
1406 	}
1407 	assert(!pgt->num_used_entries);
1408 
1409 out:
1410 	if (is_user_ta_ctx(pgt->ctx)) {
1411 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1412 			if (area->pgt == pgt)
1413 				area->pgt = NULL;
1414 		}
1415 	}
1416 
1417 	pager_unlock(exceptions);
1418 }
1419 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1420 #endif /*CFG_PAGED_USER_TA*/
1421 
1422 void tee_pager_release_phys(void *addr, size_t size)
1423 {
1424 	bool unmaped = false;
1425 	vaddr_t va = (vaddr_t)addr;
1426 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1427 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1428 	struct tee_pager_area *area;
1429 	uint32_t exceptions;
1430 
1431 	if (!size)
1432 		return;
1433 
1434 	area = find_area(&tee_pager_area_head, begin);
1435 	if (!area ||
1436 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1437 		panic();
1438 
1439 	exceptions = pager_lock();
1440 
1441 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1442 		unmaped |= tee_pager_release_one_phys(area, va);
1443 
1444 	/* Invalidate secure TLB */
1445 	if (unmaped)
1446 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1447 
1448 	pager_unlock(exceptions);
1449 }
1450 KEEP_PAGER(tee_pager_release_phys);
1451 
1452 void *tee_pager_alloc(size_t size, uint32_t flags)
1453 {
1454 	tee_mm_entry_t *mm;
1455 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1456 
1457 	if (!size)
1458 		return NULL;
1459 
1460 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1461 	if (!mm)
1462 		return NULL;
1463 
1464 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1465 				f, NULL, NULL);
1466 
1467 	return (void *)tee_mm_get_smem(mm);
1468 }
1469