xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision f0d0c3016d6f3b15e9e9253a72c6274115dd3c4e)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <kernel/tz_ssvce.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_pager.h>
43 #include <types_ext.h>
44 #include <stdlib.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <utee_defines.h>
49 #include <util.h>
50 
51 #include "pager_private.h"
52 
53 #define PAGER_AE_KEY_BITS	256
54 
55 struct pager_rw_pstate {
56 	uint64_t iv;
57 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
58 };
59 
60 enum area_type {
61 	AREA_TYPE_RO,
62 	AREA_TYPE_RW,
63 	AREA_TYPE_LOCK,
64 };
65 
66 struct tee_pager_area {
67 	union {
68 		const uint8_t *hashes;
69 		struct pager_rw_pstate *rwp;
70 	} u;
71 	uint8_t *store;
72 	enum area_type type;
73 	uint32_t flags;
74 	vaddr_t base;
75 	size_t size;
76 	struct pgt *pgt;
77 	TAILQ_ENTRY(tee_pager_area) link;
78 };
79 
80 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
81 
82 static struct tee_pager_area_head tee_pager_area_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
84 
85 #define INVALID_PGIDX	UINT_MAX
86 
87 /*
88  * struct tee_pager_pmem - Represents a physical page used for paging.
89  *
90  * @pgidx	an index of the entry in area->ti.
91  * @va_alias	Virtual address where the physical page always is aliased.
92  *		Used during remapping of the page when the content need to
93  *		be updated before it's available at the new location.
94  * @area	a pointer to the pager area
95  */
96 struct tee_pager_pmem {
97 	unsigned pgidx;
98 	void *va_alias;
99 	struct tee_pager_area *area;
100 	TAILQ_ENTRY(tee_pager_pmem) link;
101 };
102 
103 /* The list of physical pages. The first page in the list is the oldest */
104 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
105 
106 static struct tee_pager_pmem_head tee_pager_pmem_head =
107 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
108 
109 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
110 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
111 
112 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
113 
114 /* number of pages hidden */
115 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
116 
117 /* Number of registered physical pages, used hiding pages. */
118 static size_t tee_pager_npages;
119 
120 #ifdef CFG_WITH_STATS
121 static struct tee_pager_stats pager_stats;
122 
123 static inline void incr_ro_hits(void)
124 {
125 	pager_stats.ro_hits++;
126 }
127 
128 static inline void incr_rw_hits(void)
129 {
130 	pager_stats.rw_hits++;
131 }
132 
133 static inline void incr_hidden_hits(void)
134 {
135 	pager_stats.hidden_hits++;
136 }
137 
138 static inline void incr_zi_released(void)
139 {
140 	pager_stats.zi_released++;
141 }
142 
143 static inline void incr_npages_all(void)
144 {
145 	pager_stats.npages_all++;
146 }
147 
148 static inline void set_npages(void)
149 {
150 	pager_stats.npages = tee_pager_npages;
151 }
152 
153 void tee_pager_get_stats(struct tee_pager_stats *stats)
154 {
155 	*stats = pager_stats;
156 
157 	pager_stats.hidden_hits = 0;
158 	pager_stats.ro_hits = 0;
159 	pager_stats.rw_hits = 0;
160 	pager_stats.zi_released = 0;
161 }
162 
163 #else /* CFG_WITH_STATS */
164 static inline void incr_ro_hits(void) { }
165 static inline void incr_rw_hits(void) { }
166 static inline void incr_hidden_hits(void) { }
167 static inline void incr_zi_released(void) { }
168 static inline void incr_npages_all(void) { }
169 static inline void set_npages(void) { }
170 
171 void tee_pager_get_stats(struct tee_pager_stats *stats)
172 {
173 	memset(stats, 0, sizeof(struct tee_pager_stats));
174 }
175 #endif /* CFG_WITH_STATS */
176 
177 static struct pgt pager_core_pgt;
178 struct core_mmu_table_info tee_pager_tbl_info;
179 static struct core_mmu_table_info pager_alias_tbl_info;
180 
181 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
182 
183 /* Defines the range of the alias area */
184 static tee_mm_entry_t *pager_alias_area;
185 /*
186  * Physical pages are added in a stack like fashion to the alias area,
187  * @pager_alias_next_free gives the address of next free entry if
188  * @pager_alias_next_free is != 0
189  */
190 static uintptr_t pager_alias_next_free;
191 
192 static uint32_t pager_lock(void)
193 {
194 	return cpu_spin_lock_xsave(&pager_spinlock);
195 }
196 
197 static void pager_unlock(uint32_t exceptions)
198 {
199 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
200 }
201 
202 static void set_alias_area(tee_mm_entry_t *mm)
203 {
204 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
205 	size_t tbl_va_size;
206 	unsigned idx;
207 	unsigned last_idx;
208 	vaddr_t smem = tee_mm_get_smem(mm);
209 	size_t nbytes = tee_mm_get_bytes(mm);
210 
211 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
212 
213 	if (pager_alias_area)
214 		panic("null pager_alias_area");
215 
216 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
217 		panic("Can't find translation table");
218 
219 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
220 		panic("Unsupported page size in translation table");
221 
222 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
223 	if (!core_is_buffer_inside(smem, nbytes,
224 				   ti->va_base, tbl_va_size)) {
225 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
226 		     smem, nbytes, ti->va_base, tbl_va_size);
227 		panic();
228 	}
229 
230 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
231 		panic("invalid area alignment");
232 
233 	pager_alias_area = mm;
234 	pager_alias_next_free = smem;
235 
236 	/* Clear all mapping in the alias area */
237 	idx = core_mmu_va2idx(ti, smem);
238 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
239 	for (; idx < last_idx; idx++)
240 		core_mmu_set_entry(ti, idx, 0, 0);
241 
242 	/* TODO only invalidate entries touched above */
243 	tlbi_all();
244 }
245 
246 static void generate_ae_key(void)
247 {
248 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
249 		panic("failed to generate random");
250 }
251 
252 void tee_pager_init(tee_mm_entry_t *mm_alias)
253 {
254 	set_alias_area(mm_alias);
255 	generate_ae_key();
256 }
257 
258 static void *pager_add_alias_page(paddr_t pa)
259 {
260 	unsigned idx;
261 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
262 	/* Alias pages mapped without write permission: runtime will care */
263 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
264 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
265 			TEE_MATTR_SECURE | TEE_MATTR_PR;
266 
267 	DMSG("0x%" PRIxPA, pa);
268 
269 	if (!pager_alias_next_free || !ti->num_entries)
270 		panic("invalid alias entry");
271 
272 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
273 	core_mmu_set_entry(ti, idx, pa, attr);
274 	pgt_inc_used_entries(&pager_core_pgt);
275 	pager_alias_next_free += SMALL_PAGE_SIZE;
276 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
277 				      tee_mm_get_bytes(pager_alias_area)))
278 		pager_alias_next_free = 0;
279 	return (void *)core_mmu_idx2va(ti, idx);
280 }
281 
282 static struct tee_pager_area *alloc_area(struct pgt *pgt,
283 					 vaddr_t base, size_t size,
284 					 uint32_t flags, const void *store,
285 					 const void *hashes)
286 {
287 	struct tee_pager_area *area = calloc(1, sizeof(*area));
288 	enum area_type at;
289 	tee_mm_entry_t *mm_store = NULL;
290 
291 	if (!area)
292 		return NULL;
293 
294 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
295 		if (flags & TEE_MATTR_LOCKED) {
296 			at = AREA_TYPE_LOCK;
297 			goto out;
298 		}
299 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
300 		if (!mm_store)
301 			goto bad;
302 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
303 					   MEM_AREA_TA_RAM);
304 		if (!area->store)
305 			goto bad;
306 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
307 				     sizeof(struct pager_rw_pstate));
308 		if (!area->u.rwp)
309 			goto bad;
310 		at = AREA_TYPE_RW;
311 	} else {
312 		area->store = (void *)store;
313 		area->u.hashes = hashes;
314 		at = AREA_TYPE_RO;
315 	}
316 out:
317 	area->pgt = pgt;
318 	area->base = base;
319 	area->size = size;
320 	area->flags = flags;
321 	area->type = at;
322 	return area;
323 bad:
324 	tee_mm_free(mm_store);
325 	free(area->u.rwp);
326 	free(area);
327 	return NULL;
328 }
329 
330 static void area_insert_tail(struct tee_pager_area *area)
331 {
332 	uint32_t exceptions = pager_lock();
333 
334 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
335 
336 	pager_unlock(exceptions);
337 }
338 KEEP_PAGER(area_insert_tail);
339 
340 static size_t tbl_usage_count(struct pgt *pgt)
341 {
342 	size_t n;
343 	paddr_t pa;
344 	size_t usage = 0;
345 
346 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
347 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
348 					     n, &pa, NULL);
349 		if (pa)
350 			usage++;
351 	}
352 	return usage;
353 }
354 
355 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
356 			const void *store, const void *hashes)
357 {
358 	struct tee_pager_area *area;
359 	size_t tbl_va_size;
360 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
361 
362 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
363 		base, base + size, flags, store, hashes);
364 
365 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
366 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
367 		panic();
368 	}
369 
370 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
371 		panic("write pages cannot provide store or hashes");
372 
373 	if ((flags & TEE_MATTR_PW) && (store || hashes))
374 		panic("non-write pages must provide store and hashes");
375 
376 	if (!pager_core_pgt.tbl) {
377 		pager_core_pgt.tbl = ti->table;
378 		pgt_set_used_entries(&pager_core_pgt,
379 				     tbl_usage_count(&pager_core_pgt));
380 	}
381 
382 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
383 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
384 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
385 			base, size, ti->va_base, tbl_va_size);
386 		return false;
387 	}
388 
389 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
390 	if (!area)
391 		return false;
392 
393 	area_insert_tail(area);
394 	return true;
395 }
396 
397 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
398 					vaddr_t va)
399 {
400 	struct tee_pager_area *area;
401 
402 	if (!areas)
403 		return NULL;
404 
405 	TAILQ_FOREACH(area, areas, link) {
406 		if (core_is_buffer_inside(va, 1, area->base, area->size))
407 			return area;
408 	}
409 	return NULL;
410 }
411 
412 #ifdef CFG_PAGED_USER_TA
413 static struct tee_pager_area *find_uta_area(vaddr_t va)
414 {
415 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
416 
417 	if (!ctx || !is_user_ta_ctx(ctx))
418 		return NULL;
419 	return find_area(to_user_ta_ctx(ctx)->areas, va);
420 }
421 #else
422 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
423 {
424 	return NULL;
425 }
426 #endif /*CFG_PAGED_USER_TA*/
427 
428 
429 static uint32_t get_area_mattr(uint32_t area_flags)
430 {
431 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
432 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
433 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
434 
435 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
436 		attr |= TEE_MATTR_GLOBAL;
437 
438 	return attr;
439 }
440 
441 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
442 {
443 	paddr_t pa;
444 	unsigned idx;
445 
446 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
447 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
448 	return pa;
449 }
450 
451 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
452 			void *dst)
453 {
454 	struct pager_aes_gcm_iv iv = {
455 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
456 	};
457 
458 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
459 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
460 }
461 
462 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
463 {
464 	struct pager_aes_gcm_iv iv;
465 
466 	assert((rwp->iv + 1) > rwp->iv);
467 	rwp->iv++;
468 	/*
469 	 * IV is constructed as recommended in section "8.2.1 Deterministic
470 	 * Construction" of "Recommendation for Block Cipher Modes of
471 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
472 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
473 	 */
474 	iv.iv[0] = (vaddr_t)rwp;
475 	iv.iv[1] = rwp->iv >> 32;
476 	iv.iv[2] = rwp->iv;
477 
478 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
479 				   &iv, rwp->tag,
480 				   src, dst, SMALL_PAGE_SIZE))
481 		panic("gcm failed");
482 }
483 
484 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
485 			void *va_alias)
486 {
487 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
488 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
489 	struct core_mmu_table_info *ti;
490 	uint32_t attr_alias;
491 	paddr_t pa_alias;
492 	unsigned int idx_alias;
493 
494 	/* Insure we are allowed to write to aliased virtual page */
495 	ti = &pager_alias_tbl_info;
496 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
497 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
498 	if (!(attr_alias & TEE_MATTR_PW)) {
499 		attr_alias |= TEE_MATTR_PW;
500 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
501 		/* TODO: flush TLB for target page only */
502 		tlbi_all();
503 	}
504 
505 	switch (area->type) {
506 	case AREA_TYPE_RO:
507 		{
508 			const void *hash = area->u.hashes +
509 					   idx * TEE_SHA256_HASH_SIZE;
510 
511 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
512 			incr_ro_hits();
513 
514 			if (hash_sha256_check(hash, va_alias,
515 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
516 				EMSG("PH 0x%" PRIxVA " failed", page_va);
517 				panic();
518 			}
519 		}
520 		/* Forbid write to aliases for read-only (maybe exec) pages */
521 		attr_alias &= ~TEE_MATTR_PW;
522 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
523 		/* TODO: flush TLB for target page only */
524 		tlbi_all();
525 		break;
526 	case AREA_TYPE_RW:
527 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
528 			va_alias, page_va, area->u.rwp[idx].iv);
529 		if (!area->u.rwp[idx].iv)
530 			memset(va_alias, 0, SMALL_PAGE_SIZE);
531 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
532 				       va_alias)) {
533 			EMSG("PH 0x%" PRIxVA " failed", page_va);
534 			panic();
535 		}
536 		incr_rw_hits();
537 		break;
538 	case AREA_TYPE_LOCK:
539 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
540 		memset(va_alias, 0, SMALL_PAGE_SIZE);
541 		break;
542 	default:
543 		panic();
544 	}
545 }
546 
547 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
548 {
549 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
550 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
551 
552 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
553 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
554 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
555 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
556 
557 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
558 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
559 			     stored_page);
560 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
561 			pmem->area->base + idx * SMALL_PAGE_SIZE,
562 			pmem->area->u.rwp[idx].iv);
563 	}
564 }
565 
566 static void area_get_entry(struct tee_pager_area *area, size_t idx,
567 			   paddr_t *pa, uint32_t *attr)
568 {
569 	assert(area->pgt);
570 	assert(idx < tee_pager_tbl_info.num_entries);
571 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
572 				     idx, pa, attr);
573 }
574 
575 static void area_set_entry(struct tee_pager_area *area, size_t idx,
576 			   paddr_t pa, uint32_t attr)
577 {
578 	assert(area->pgt);
579 	assert(idx < tee_pager_tbl_info.num_entries);
580 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
581 				     idx, pa, attr);
582 }
583 
584 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
585 {
586 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
587 }
588 
589 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
590 					 size_t idx)
591 {
592 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
593 }
594 
595 #ifdef CFG_PAGED_USER_TA
596 static void free_area(struct tee_pager_area *area)
597 {
598 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
599 				virt_to_phys(area->store)));
600 	if (area->type == AREA_TYPE_RW)
601 		free(area->u.rwp);
602 	free(area);
603 }
604 
605 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
606 			       size_t size)
607 {
608 	struct tee_pager_area *area;
609 	uint32_t flags;
610 	vaddr_t b = base;
611 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
612 
613 	if (!utc->areas) {
614 		utc->areas = malloc(sizeof(*utc->areas));
615 		if (!utc->areas)
616 			return false;
617 		TAILQ_INIT(utc->areas);
618 	}
619 
620 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
621 
622 	while (s) {
623 		size_t s2;
624 
625 		if (find_area(utc->areas, b))
626 			return false;
627 
628 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
629 
630 		/* Table info will be set when the context is activated. */
631 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
632 		if (!area)
633 			return false;
634 		TAILQ_INSERT_TAIL(utc->areas, area, link);
635 		b += s2;
636 		s -= s2;
637 	}
638 
639 	return true;
640 }
641 
642 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
643 {
644 	struct thread_specific_data *tsd = thread_get_tsd();
645 	struct tee_pager_area *area;
646 	struct core_mmu_table_info dir_info = { NULL };
647 
648 	if (&utc->ctx != tsd->ctx) {
649 		/*
650 		 * Changes are to an utc that isn't active. Just add the
651 		 * areas page tables will be dealt with later.
652 		 */
653 		return pager_add_uta_area(utc, base, size);
654 	}
655 
656 	/*
657 	 * Assign page tables before adding areas to be able to tell which
658 	 * are newly added and should be removed in case of failure.
659 	 */
660 	tee_pager_assign_uta_tables(utc);
661 	if (!pager_add_uta_area(utc, base, size)) {
662 		struct tee_pager_area *next_a;
663 
664 		/* Remove all added areas */
665 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
666 			if (!area->pgt) {
667 				TAILQ_REMOVE(utc->areas, area, link);
668 				free_area(area);
669 			}
670 		}
671 		return false;
672 	}
673 
674 	/*
675 	 * Assign page tables to the new areas and make sure that the page
676 	 * tables are registered in the upper table.
677 	 */
678 	tee_pager_assign_uta_tables(utc);
679 	core_mmu_get_user_pgdir(&dir_info);
680 	TAILQ_FOREACH(area, utc->areas, link) {
681 		paddr_t pa;
682 		size_t idx;
683 		uint32_t attr;
684 
685 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
686 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
687 
688 		/*
689 		 * Check if the page table already is used, if it is, it's
690 		 * already registered.
691 		 */
692 		if (area->pgt->num_used_entries) {
693 			assert(attr & TEE_MATTR_TABLE);
694 			assert(pa == virt_to_phys(area->pgt->tbl));
695 			continue;
696 		}
697 
698 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
699 		pa = virt_to_phys(area->pgt->tbl);
700 		assert(pa);
701 		/*
702 		 * Note that the update of the table entry is guaranteed to
703 		 * be atomic.
704 		 */
705 		core_mmu_set_entry(&dir_info, idx, pa, attr);
706 	}
707 
708 	return true;
709 }
710 
711 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
712 				   struct pgt *pgt)
713 {
714 	assert(pgt);
715 	ti->table = pgt->tbl;
716 	ti->va_base = pgt->vabase;
717 	ti->level = tee_pager_tbl_info.level;
718 	ti->shift = tee_pager_tbl_info.shift;
719 	ti->num_entries = tee_pager_tbl_info.num_entries;
720 }
721 
722 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
723 			   vaddr_t new_base)
724 {
725 	uint32_t exceptions = pager_lock();
726 
727 	/*
728 	 * If there's no pgt assigned to the old area there's no pages to
729 	 * deal with either, just update with a new pgt and base.
730 	 */
731 	if (area->pgt) {
732 		struct core_mmu_table_info old_ti;
733 		struct core_mmu_table_info new_ti;
734 		struct tee_pager_pmem *pmem;
735 
736 		init_tbl_info_from_pgt(&old_ti, area->pgt);
737 		init_tbl_info_from_pgt(&new_ti, new_pgt);
738 
739 
740 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
741 			vaddr_t va;
742 			paddr_t pa;
743 			uint32_t attr;
744 
745 			if (pmem->area != area)
746 				continue;
747 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
748 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
749 
750 			assert(pa == get_pmem_pa(pmem));
751 			assert(attr);
752 			assert(area->pgt->num_used_entries);
753 			area->pgt->num_used_entries--;
754 
755 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
756 			va = va - area->base + new_base;
757 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
758 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
759 			new_pgt->num_used_entries++;
760 		}
761 	}
762 
763 	area->pgt = new_pgt;
764 	area->base = new_base;
765 	pager_unlock(exceptions);
766 }
767 KEEP_PAGER(transpose_area);
768 
769 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
770 				   vaddr_t src_base,
771 				   struct user_ta_ctx *dst_utc,
772 				   vaddr_t dst_base, struct pgt **dst_pgt,
773 				   size_t size)
774 {
775 	struct tee_pager_area *area;
776 	struct tee_pager_area *next_a;
777 
778 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
779 		vaddr_t new_area_base;
780 		size_t new_idx;
781 
782 		if (!core_is_buffer_inside(area->base, area->size,
783 					  src_base, size))
784 			continue;
785 
786 		TAILQ_REMOVE(src_utc->areas, area, link);
787 
788 		new_area_base = dst_base + (src_base - area->base);
789 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
790 			  CORE_MMU_PGDIR_SIZE;
791 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
792 		       dst_pgt[new_idx]->vabase);
793 		transpose_area(area, dst_pgt[new_idx], new_area_base);
794 
795 		/*
796 		 * Assert that this will not cause any conflicts in the new
797 		 * utc.  This should already be guaranteed, but a bug here
798 		 * could be tricky to find.
799 		 */
800 		assert(!find_area(dst_utc->areas, area->base));
801 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
802 	}
803 }
804 
805 static void rem_area(struct tee_pager_area_head *area_head,
806 		     struct tee_pager_area *area)
807 {
808 	struct tee_pager_pmem *pmem;
809 	uint32_t exceptions;
810 
811 	exceptions = pager_lock();
812 
813 	TAILQ_REMOVE(area_head, area, link);
814 
815 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
816 		if (pmem->area == area) {
817 			area_set_entry(area, pmem->pgidx, 0, 0);
818 			pgt_dec_used_entries(area->pgt);
819 			pmem->area = NULL;
820 			pmem->pgidx = INVALID_PGIDX;
821 		}
822 	}
823 
824 	/* TODO only invalidate entries touched above */
825 	tlbi_all();
826 
827 	pager_unlock(exceptions);
828 	free_area(area);
829 }
830 KEEP_PAGER(rem_area);
831 
832 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
833 			      size_t size)
834 {
835 	struct tee_pager_area *area;
836 	struct tee_pager_area *next_a;
837 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
838 
839 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
840 		if (core_is_buffer_inside(area->base, area->size, base, s))
841 			rem_area(utc->areas, area);
842 	}
843 }
844 
845 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
846 {
847 	struct tee_pager_area *area;
848 
849 	if (!utc->areas)
850 		return;
851 
852 	while (true) {
853 		area = TAILQ_FIRST(utc->areas);
854 		if (!area)
855 			break;
856 		TAILQ_REMOVE(utc->areas, area, link);
857 		free_area(area);
858 	}
859 
860 	free(utc->areas);
861 }
862 
863 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
864 				 size_t size, uint32_t flags)
865 {
866 	bool ret;
867 	vaddr_t b = base;
868 	size_t s = size;
869 	size_t s2;
870 	struct tee_pager_area *area = find_area(utc->areas, b);
871 	uint32_t exceptions;
872 	struct tee_pager_pmem *pmem;
873 	paddr_t pa;
874 	uint32_t a;
875 	uint32_t f;
876 
877 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
878 	if (f & TEE_MATTR_UW)
879 		f |= TEE_MATTR_PW;
880 	f = get_area_mattr(f);
881 
882 	exceptions = pager_lock();
883 
884 	while (s) {
885 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
886 		if (!area || area->base != b || area->size != s2) {
887 			ret = false;
888 			goto out;
889 		}
890 		b += s2;
891 		s -= s2;
892 
893 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
894 			if (pmem->area != area)
895 				continue;
896 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
897 			if (a & TEE_MATTR_VALID_BLOCK)
898 				assert(pa == get_pmem_pa(pmem));
899 			else
900 				pa = get_pmem_pa(pmem);
901 			if (a == f)
902 				continue;
903 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
904 			/* TODO only invalidate entries touched above */
905 			tlbi_all();
906 			if (!(flags & TEE_MATTR_UW))
907 				tee_pager_save_page(pmem, a);
908 
909 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
910 
911 			if (flags & TEE_MATTR_UX) {
912 				void *va = (void *)area_idx2va(pmem->area,
913 							       pmem->pgidx);
914 
915 				cache_op_inner(DCACHE_AREA_CLEAN, va,
916 						SMALL_PAGE_SIZE);
917 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
918 						SMALL_PAGE_SIZE);
919 			}
920 		}
921 
922 		area->flags = f;
923 		area = TAILQ_NEXT(area, link);
924 	}
925 
926 	ret = true;
927 out:
928 	pager_unlock(exceptions);
929 	return ret;
930 }
931 KEEP_PAGER(tee_pager_set_uta_area_attr);
932 #endif /*CFG_PAGED_USER_TA*/
933 
934 static bool tee_pager_unhide_page(vaddr_t page_va)
935 {
936 	struct tee_pager_pmem *pmem;
937 
938 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
939 		paddr_t pa;
940 		uint32_t attr;
941 
942 		if (pmem->pgidx == INVALID_PGIDX)
943 			continue;
944 
945 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
946 
947 		if (!(attr &
948 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
949 			continue;
950 
951 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
952 			uint32_t a = get_area_mattr(pmem->area->flags);
953 
954 			/* page is hidden, show and move to back */
955 			if (pa != get_pmem_pa(pmem))
956 				panic("unexpected pa");
957 
958 			/*
959 			 * If it's not a dirty block, then it should be
960 			 * read only.
961 			 */
962 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
963 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
964 			else
965 				FMSG("Unhide %#" PRIxVA, page_va);
966 
967 			if (page_va == 0x8000a000)
968 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
969 					page_va, a);
970 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
971 
972 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
973 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
974 
975 			/* TODO only invalidate entry touched above */
976 			tlbi_all();
977 
978 			incr_hidden_hits();
979 			return true;
980 		}
981 	}
982 
983 	return false;
984 }
985 
986 static void tee_pager_hide_pages(void)
987 {
988 	struct tee_pager_pmem *pmem;
989 	size_t n = 0;
990 
991 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
992 		paddr_t pa;
993 		uint32_t attr;
994 		uint32_t a;
995 
996 		if (n >= TEE_PAGER_NHIDE)
997 			break;
998 		n++;
999 
1000 		/* we cannot hide pages when pmem->area is not defined. */
1001 		if (!pmem->area)
1002 			continue;
1003 
1004 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1005 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1006 			continue;
1007 
1008 		assert(pa == get_pmem_pa(pmem));
1009 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1010 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1011 			FMSG("Hide %#" PRIxVA,
1012 			     area_idx2va(pmem->area, pmem->pgidx));
1013 		} else
1014 			a = TEE_MATTR_HIDDEN_BLOCK;
1015 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1016 	}
1017 
1018 	/* TODO only invalidate entries touched above */
1019 	tlbi_all();
1020 }
1021 
1022 /*
1023  * Find mapped pmem, hide and move to pageble pmem.
1024  * Return false if page was not mapped, and true if page was mapped.
1025  */
1026 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1027 				       vaddr_t page_va)
1028 {
1029 	struct tee_pager_pmem *pmem;
1030 	unsigned pgidx;
1031 	paddr_t pa;
1032 	uint32_t attr;
1033 
1034 	pgidx = area_va2idx(area, page_va);
1035 	area_get_entry(area, pgidx, &pa, &attr);
1036 
1037 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1038 
1039 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1040 		if (pmem->area != area || pmem->pgidx != pgidx)
1041 			continue;
1042 
1043 		assert(pa == get_pmem_pa(pmem));
1044 		area_set_entry(area, pgidx, 0, 0);
1045 		pgt_dec_used_entries(area->pgt);
1046 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1047 		pmem->area = NULL;
1048 		pmem->pgidx = INVALID_PGIDX;
1049 		tee_pager_npages++;
1050 		set_npages();
1051 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1052 		incr_zi_released();
1053 		return true;
1054 	}
1055 
1056 	return false;
1057 }
1058 
1059 /* Finds the oldest page and unmats it from its old virtual address */
1060 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1061 {
1062 	struct tee_pager_pmem *pmem;
1063 
1064 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1065 	if (!pmem) {
1066 		EMSG("No pmem entries");
1067 		return NULL;
1068 	}
1069 	if (pmem->pgidx != INVALID_PGIDX) {
1070 		uint32_t a;
1071 
1072 		assert(pmem->area && pmem->area->pgt);
1073 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1074 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1075 		pgt_dec_used_entries(pmem->area->pgt);
1076 		/* TODO only invalidate entries touched above */
1077 		tlbi_all();
1078 		tee_pager_save_page(pmem, a);
1079 	}
1080 
1081 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1082 	pmem->pgidx = INVALID_PGIDX;
1083 	pmem->area = NULL;
1084 	if (area->type == AREA_TYPE_LOCK) {
1085 		/* Move page to lock list */
1086 		if (tee_pager_npages <= 0)
1087 			panic("running out of page");
1088 		tee_pager_npages--;
1089 		set_npages();
1090 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1091 	} else {
1092 		/* move page to back */
1093 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1094 	}
1095 
1096 	return pmem;
1097 }
1098 
1099 static bool pager_update_permissions(struct tee_pager_area *area,
1100 			struct abort_info *ai, bool *handled)
1101 {
1102 	unsigned int pgidx = area_va2idx(area, ai->va);
1103 	uint32_t attr;
1104 	paddr_t pa;
1105 
1106 	*handled = false;
1107 
1108 	area_get_entry(area, pgidx, &pa, &attr);
1109 
1110 	/* Not mapped */
1111 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1112 		return false;
1113 
1114 	/* Not readable, should not happen */
1115 	if (abort_is_user_exception(ai)) {
1116 		if (!(attr & TEE_MATTR_UR))
1117 			return true;
1118 	} else {
1119 		if (!(attr & TEE_MATTR_PR)) {
1120 			abort_print_error(ai);
1121 			panic();
1122 		}
1123 	}
1124 
1125 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1126 	case CORE_MMU_FAULT_TRANSLATION:
1127 	case CORE_MMU_FAULT_READ_PERMISSION:
1128 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1129 			/* Check attempting to execute from an NOX page */
1130 			if (abort_is_user_exception(ai)) {
1131 				if (!(attr & TEE_MATTR_UX))
1132 					return true;
1133 			} else {
1134 				if (!(attr & TEE_MATTR_PX)) {
1135 					abort_print_error(ai);
1136 					panic();
1137 				}
1138 			}
1139 		}
1140 		/* Since the page is mapped now it's OK */
1141 		break;
1142 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1143 		/* Check attempting to write to an RO page */
1144 		if (abort_is_user_exception(ai)) {
1145 			if (!(area->flags & TEE_MATTR_UW))
1146 				return true;
1147 			if (!(attr & TEE_MATTR_UW)) {
1148 				FMSG("Dirty %p",
1149 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1150 				area_set_entry(area, pgidx, pa,
1151 					       get_area_mattr(area->flags));
1152 				/* TODO only invalidate entry above */
1153 				tlbi_all();
1154 			}
1155 
1156 		} else {
1157 			if (!(area->flags & TEE_MATTR_PW)) {
1158 				abort_print_error(ai);
1159 				panic();
1160 			}
1161 			if (!(attr & TEE_MATTR_PW)) {
1162 				FMSG("Dirty %p",
1163 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1164 				area_set_entry(area, pgidx, pa,
1165 					       get_area_mattr(area->flags));
1166 				/* TODO only invalidate entry above */
1167 				tlbi_all();
1168 			}
1169 		}
1170 		/* Since permissions has been updated now it's OK */
1171 		break;
1172 	default:
1173 		/* Some fault we can't deal with */
1174 		if (abort_is_user_exception(ai))
1175 			return true;
1176 		abort_print_error(ai);
1177 		panic();
1178 	}
1179 	*handled = true;
1180 	return true;
1181 }
1182 
1183 #ifdef CFG_TEE_CORE_DEBUG
1184 static void stat_handle_fault(void)
1185 {
1186 	static size_t num_faults;
1187 	static size_t min_npages = SIZE_MAX;
1188 	static size_t total_min_npages = SIZE_MAX;
1189 
1190 	num_faults++;
1191 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1192 		DMSG("nfaults %zu npages %zu (min %zu)",
1193 		     num_faults, tee_pager_npages, min_npages);
1194 		min_npages = tee_pager_npages; /* reset */
1195 	}
1196 	if (tee_pager_npages < min_npages)
1197 		min_npages = tee_pager_npages;
1198 	if (tee_pager_npages < total_min_npages)
1199 		total_min_npages = tee_pager_npages;
1200 }
1201 #else
1202 static void stat_handle_fault(void)
1203 {
1204 }
1205 #endif
1206 
1207 bool tee_pager_handle_fault(struct abort_info *ai)
1208 {
1209 	struct tee_pager_area *area;
1210 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1211 	uint32_t exceptions;
1212 	bool ret;
1213 
1214 #ifdef TEE_PAGER_DEBUG_PRINT
1215 	abort_print(ai);
1216 #endif
1217 
1218 	/*
1219 	 * We're updating pages that can affect several active CPUs at a
1220 	 * time below. We end up here because a thread tries to access some
1221 	 * memory that isn't available. We have to be careful when making
1222 	 * that memory available as other threads may succeed in accessing
1223 	 * that address the moment after we've made it available.
1224 	 *
1225 	 * That means that we can't just map the memory and populate the
1226 	 * page, instead we use the aliased mapping to populate the page
1227 	 * and once everything is ready we map it.
1228 	 */
1229 	exceptions = pager_lock();
1230 
1231 	stat_handle_fault();
1232 
1233 	/* check if the access is valid */
1234 	if (abort_is_user_exception(ai)) {
1235 		area = find_uta_area(ai->va);
1236 
1237 	} else {
1238 		area = find_area(&tee_pager_area_head, ai->va);
1239 		if (!area)
1240 			area = find_uta_area(ai->va);
1241 	}
1242 	if (!area || !area->pgt) {
1243 		ret = false;
1244 		goto out;
1245 	}
1246 
1247 	if (!tee_pager_unhide_page(page_va)) {
1248 		struct tee_pager_pmem *pmem = NULL;
1249 		uint32_t attr;
1250 
1251 		/*
1252 		 * The page wasn't hidden, but some other core may have
1253 		 * updated the table entry before we got here or we need
1254 		 * to make a read-only page read-write (dirty).
1255 		 */
1256 		if (pager_update_permissions(area, ai, &ret)) {
1257 			/*
1258 			 * Nothing more to do with the abort. The problem
1259 			 * could already have been dealt with from another
1260 			 * core or if ret is false the TA will be paniced.
1261 			 */
1262 			goto out;
1263 		}
1264 
1265 		pmem = tee_pager_get_page(area);
1266 		if (!pmem) {
1267 			abort_print(ai);
1268 			panic();
1269 		}
1270 
1271 		/* load page code & data */
1272 		tee_pager_load_page(area, page_va, pmem->va_alias);
1273 
1274 		/*
1275 		 * We've updated the page using the aliased mapping and
1276 		 * some cache maintenence is now needed if it's an
1277 		 * executable page.
1278 		 *
1279 		 * Since the d-cache is a Physically-indexed,
1280 		 * physically-tagged (PIPT) cache we can clean the aliased
1281 		 * address instead of the real virtual address.
1282 		 *
1283 		 * The i-cache can also be PIPT, but may be something else
1284 		 * to, to keep it simple we invalidate the entire i-cache.
1285 		 * As a future optimization we may invalidate only the
1286 		 * aliased area if it a PIPT cache else the entire cache.
1287 		 */
1288 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1289 			/*
1290 			 * Doing these operations to LoUIS (Level of
1291 			 * unification, Inner Shareable) would be enough
1292 			 */
1293 			cache_op_inner(DCACHE_AREA_CLEAN, pmem->va_alias,
1294 					SMALL_PAGE_SIZE);
1295 			cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
1296 		}
1297 
1298 		pmem->area = area;
1299 		pmem->pgidx = area_va2idx(area, ai->va);
1300 		attr = get_area_mattr(area->flags) &
1301 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1302 		area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1303 		pgt_inc_used_entries(area->pgt);
1304 
1305 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1306 		     area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1307 
1308 	}
1309 
1310 	tee_pager_hide_pages();
1311 	ret = true;
1312 out:
1313 	pager_unlock(exceptions);
1314 	return ret;
1315 }
1316 
1317 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1318 {
1319 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1320 	size_t n;
1321 
1322 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1323 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1324 
1325 	/* setup memory */
1326 	for (n = 0; n < npages; n++) {
1327 		struct tee_pager_pmem *pmem;
1328 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1329 		unsigned pgidx = core_mmu_va2idx(ti, va);
1330 		paddr_t pa;
1331 		uint32_t attr;
1332 
1333 		/*
1334 		 * Note that we can only support adding pages in the
1335 		 * valid range of this table info, currently not a problem.
1336 		 */
1337 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1338 
1339 		/* Ignore unmapped pages/blocks */
1340 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1341 			continue;
1342 
1343 		pmem = malloc(sizeof(struct tee_pager_pmem));
1344 		if (!pmem)
1345 			panic("out of mem");
1346 
1347 		pmem->va_alias = pager_add_alias_page(pa);
1348 
1349 		if (unmap) {
1350 			pmem->area = NULL;
1351 			pmem->pgidx = INVALID_PGIDX;
1352 			core_mmu_set_entry(ti, pgidx, 0, 0);
1353 			pgt_dec_used_entries(&pager_core_pgt);
1354 		} else {
1355 			/*
1356 			 * The page is still mapped, let's assign the area
1357 			 * and update the protection bits accordingly.
1358 			 */
1359 			pmem->area = find_area(&tee_pager_area_head, va);
1360 			assert(pmem->area->pgt == &pager_core_pgt);
1361 			pmem->pgidx = pgidx;
1362 			assert(pa == get_pmem_pa(pmem));
1363 			area_set_entry(pmem->area, pgidx, pa,
1364 				       get_area_mattr(pmem->area->flags));
1365 		}
1366 
1367 		tee_pager_npages++;
1368 		incr_npages_all();
1369 		set_npages();
1370 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1371 	}
1372 
1373 	/*
1374 	 * As this is done at inits, invalidate all TLBs once instead of
1375 	 * targeting only the modified entries.
1376 	 */
1377 	tlbi_all();
1378 }
1379 
1380 #ifdef CFG_PAGED_USER_TA
1381 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1382 {
1383 	struct pgt *p = pgt;
1384 
1385 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1386 		p = SLIST_NEXT(p, link);
1387 	return p;
1388 }
1389 
1390 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1391 {
1392 	struct tee_pager_area *area;
1393 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1394 
1395 	TAILQ_FOREACH(area, utc->areas, link) {
1396 		if (!area->pgt)
1397 			area->pgt = find_pgt(pgt, area->base);
1398 		else
1399 			assert(area->pgt == find_pgt(pgt, area->base));
1400 		if (!area->pgt)
1401 			panic();
1402 	}
1403 }
1404 
1405 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1406 {
1407 	uint32_t attr;
1408 
1409 	assert(pmem->area && pmem->area->pgt);
1410 
1411 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1412 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1413 	/* TODO only invalidate entry touched above */
1414 	tlbi_all();
1415 	tee_pager_save_page(pmem, attr);
1416 	assert(pmem->area->pgt->num_used_entries);
1417 	pmem->area->pgt->num_used_entries--;
1418 	pmem->pgidx = INVALID_PGIDX;
1419 	pmem->area = NULL;
1420 }
1421 
1422 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1423 {
1424 	struct tee_pager_pmem *pmem;
1425 	struct tee_pager_area *area;
1426 	uint32_t exceptions = pager_lock();
1427 
1428 	if (!pgt->num_used_entries)
1429 		goto out;
1430 
1431 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1432 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1433 			continue;
1434 		if (pmem->area->pgt == pgt)
1435 			pager_save_and_release_entry(pmem);
1436 	}
1437 	assert(!pgt->num_used_entries);
1438 
1439 out:
1440 	if (is_user_ta_ctx(pgt->ctx)) {
1441 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1442 			if (area->pgt == pgt)
1443 				area->pgt = NULL;
1444 		}
1445 	}
1446 
1447 	pager_unlock(exceptions);
1448 }
1449 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1450 #endif /*CFG_PAGED_USER_TA*/
1451 
1452 void tee_pager_release_phys(void *addr, size_t size)
1453 {
1454 	bool unmaped = false;
1455 	vaddr_t va = (vaddr_t)addr;
1456 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1457 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1458 	struct tee_pager_area *area;
1459 	uint32_t exceptions;
1460 
1461 	if (end <= begin)
1462 		return;
1463 
1464 	area = find_area(&tee_pager_area_head, begin);
1465 	if (!area ||
1466 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1467 		panic();
1468 
1469 	exceptions = pager_lock();
1470 
1471 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1472 		unmaped |= tee_pager_release_one_phys(area, va);
1473 
1474 	if (unmaped)
1475 		tlbi_all();
1476 
1477 	pager_unlock(exceptions);
1478 }
1479 KEEP_PAGER(tee_pager_release_phys);
1480 
1481 void *tee_pager_alloc(size_t size, uint32_t flags)
1482 {
1483 	tee_mm_entry_t *mm;
1484 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1485 
1486 	if (!size)
1487 		return NULL;
1488 
1489 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1490 	if (!mm)
1491 		return NULL;
1492 
1493 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1494 				f, NULL, NULL);
1495 
1496 	return (void *)tee_mm_get_smem(mm);
1497 }
1498