xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 3078da83b083e234ed5aaac151b728eb9a304ba4)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tlb_helpers.h>
37 #include <kernel/tee_misc.h>
38 #include <kernel/tee_ta_manager.h>
39 #include <kernel/thread.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_pager.h>
43 #include <types_ext.h>
44 #include <stdlib.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <utee_defines.h>
49 #include <util.h>
50 
51 #include "pager_private.h"
52 
53 #define PAGER_AE_KEY_BITS	256
54 
55 struct pager_rw_pstate {
56 	uint64_t iv;
57 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
58 };
59 
60 enum area_type {
61 	AREA_TYPE_RO,
62 	AREA_TYPE_RW,
63 	AREA_TYPE_LOCK,
64 };
65 
66 struct tee_pager_area {
67 	union {
68 		const uint8_t *hashes;
69 		struct pager_rw_pstate *rwp;
70 	} u;
71 	uint8_t *store;
72 	enum area_type type;
73 	uint32_t flags;
74 	vaddr_t base;
75 	size_t size;
76 	struct pgt *pgt;
77 	TAILQ_ENTRY(tee_pager_area) link;
78 };
79 
80 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
81 
82 static struct tee_pager_area_head tee_pager_area_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
84 
85 #define INVALID_PGIDX	UINT_MAX
86 
87 /*
88  * struct tee_pager_pmem - Represents a physical page used for paging.
89  *
90  * @pgidx	an index of the entry in area->ti.
91  * @va_alias	Virtual address where the physical page always is aliased.
92  *		Used during remapping of the page when the content need to
93  *		be updated before it's available at the new location.
94  * @area	a pointer to the pager area
95  */
96 struct tee_pager_pmem {
97 	unsigned pgidx;
98 	void *va_alias;
99 	struct tee_pager_area *area;
100 	TAILQ_ENTRY(tee_pager_pmem) link;
101 };
102 
103 /* The list of physical pages. The first page in the list is the oldest */
104 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
105 
106 static struct tee_pager_pmem_head tee_pager_pmem_head =
107 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
108 
109 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
110 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
111 
112 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
113 
114 /* number of pages hidden */
115 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
116 
117 /* Number of registered physical pages, used hiding pages. */
118 static size_t tee_pager_npages;
119 
120 #ifdef CFG_WITH_STATS
121 static struct tee_pager_stats pager_stats;
122 
123 static inline void incr_ro_hits(void)
124 {
125 	pager_stats.ro_hits++;
126 }
127 
128 static inline void incr_rw_hits(void)
129 {
130 	pager_stats.rw_hits++;
131 }
132 
133 static inline void incr_hidden_hits(void)
134 {
135 	pager_stats.hidden_hits++;
136 }
137 
138 static inline void incr_zi_released(void)
139 {
140 	pager_stats.zi_released++;
141 }
142 
143 static inline void incr_npages_all(void)
144 {
145 	pager_stats.npages_all++;
146 }
147 
148 static inline void set_npages(void)
149 {
150 	pager_stats.npages = tee_pager_npages;
151 }
152 
153 void tee_pager_get_stats(struct tee_pager_stats *stats)
154 {
155 	*stats = pager_stats;
156 
157 	pager_stats.hidden_hits = 0;
158 	pager_stats.ro_hits = 0;
159 	pager_stats.rw_hits = 0;
160 	pager_stats.zi_released = 0;
161 }
162 
163 #else /* CFG_WITH_STATS */
164 static inline void incr_ro_hits(void) { }
165 static inline void incr_rw_hits(void) { }
166 static inline void incr_hidden_hits(void) { }
167 static inline void incr_zi_released(void) { }
168 static inline void incr_npages_all(void) { }
169 static inline void set_npages(void) { }
170 
171 void tee_pager_get_stats(struct tee_pager_stats *stats)
172 {
173 	memset(stats, 0, sizeof(struct tee_pager_stats));
174 }
175 #endif /* CFG_WITH_STATS */
176 
177 static struct pgt pager_core_pgt;
178 struct core_mmu_table_info tee_pager_tbl_info;
179 static struct core_mmu_table_info pager_alias_tbl_info;
180 
181 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
182 
183 /* Defines the range of the alias area */
184 static tee_mm_entry_t *pager_alias_area;
185 /*
186  * Physical pages are added in a stack like fashion to the alias area,
187  * @pager_alias_next_free gives the address of next free entry if
188  * @pager_alias_next_free is != 0
189  */
190 static uintptr_t pager_alias_next_free;
191 
192 #ifdef CFG_TEE_CORE_DEBUG
193 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
194 
195 static uint32_t pager_lock_dldetect(const char *func, const int line,
196 				    struct abort_info *ai)
197 {
198 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
199 	unsigned int retries = 0;
200 	unsigned int reminder = 0;
201 
202 	while (!cpu_spin_trylock(&pager_spinlock)) {
203 		retries++;
204 		if (!retries) {
205 			/* wrapped, time to report */
206 			trace_printf(func, line, TRACE_ERROR, true,
207 				     "possible spinlock deadlock reminder %u",
208 				     reminder);
209 			if (reminder < UINT_MAX)
210 				reminder++;
211 			if (ai)
212 				abort_print(ai);
213 		}
214 	}
215 
216 	return exceptions;
217 }
218 #else
219 static uint32_t pager_lock(struct abort_info __unused *ai)
220 {
221 	return cpu_spin_lock_xsave(&pager_spinlock);
222 }
223 #endif
224 
225 static void pager_unlock(uint32_t exceptions)
226 {
227 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
228 }
229 
230 static void set_alias_area(tee_mm_entry_t *mm)
231 {
232 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
233 	size_t tbl_va_size;
234 	unsigned idx;
235 	unsigned last_idx;
236 	vaddr_t smem = tee_mm_get_smem(mm);
237 	size_t nbytes = tee_mm_get_bytes(mm);
238 
239 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
240 
241 	if (pager_alias_area)
242 		panic("null pager_alias_area");
243 
244 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
245 		panic("Can't find translation table");
246 
247 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
248 		panic("Unsupported page size in translation table");
249 
250 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
251 	if (!core_is_buffer_inside(smem, nbytes,
252 				   ti->va_base, tbl_va_size)) {
253 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
254 		     smem, nbytes, ti->va_base, tbl_va_size);
255 		panic();
256 	}
257 
258 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
259 		panic("invalid area alignment");
260 
261 	pager_alias_area = mm;
262 	pager_alias_next_free = smem;
263 
264 	/* Clear all mapping in the alias area */
265 	idx = core_mmu_va2idx(ti, smem);
266 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
267 	for (; idx < last_idx; idx++)
268 		core_mmu_set_entry(ti, idx, 0, 0);
269 
270 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
271 }
272 
273 static void generate_ae_key(void)
274 {
275 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
276 		panic("failed to generate random");
277 }
278 
279 void tee_pager_init(tee_mm_entry_t *mm_alias)
280 {
281 	set_alias_area(mm_alias);
282 	generate_ae_key();
283 }
284 
285 static void *pager_add_alias_page(paddr_t pa)
286 {
287 	unsigned idx;
288 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
289 	/* Alias pages mapped without write permission: runtime will care */
290 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
291 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
292 			TEE_MATTR_SECURE | TEE_MATTR_PR;
293 
294 	DMSG("0x%" PRIxPA, pa);
295 
296 	if (!pager_alias_next_free || !ti->num_entries)
297 		panic("invalid alias entry");
298 
299 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
300 	core_mmu_set_entry(ti, idx, pa, attr);
301 	pgt_inc_used_entries(&pager_core_pgt);
302 	pager_alias_next_free += SMALL_PAGE_SIZE;
303 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
304 				      tee_mm_get_bytes(pager_alias_area)))
305 		pager_alias_next_free = 0;
306 	return (void *)core_mmu_idx2va(ti, idx);
307 }
308 
309 static struct tee_pager_area *alloc_area(struct pgt *pgt,
310 					 vaddr_t base, size_t size,
311 					 uint32_t flags, const void *store,
312 					 const void *hashes)
313 {
314 	struct tee_pager_area *area = calloc(1, sizeof(*area));
315 	enum area_type at;
316 	tee_mm_entry_t *mm_store = NULL;
317 
318 	if (!area)
319 		return NULL;
320 
321 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
322 		if (flags & TEE_MATTR_LOCKED) {
323 			at = AREA_TYPE_LOCK;
324 			goto out;
325 		}
326 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
327 		if (!mm_store)
328 			goto bad;
329 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
330 					   MEM_AREA_TA_RAM);
331 		if (!area->store)
332 			goto bad;
333 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
334 				     sizeof(struct pager_rw_pstate));
335 		if (!area->u.rwp)
336 			goto bad;
337 		at = AREA_TYPE_RW;
338 	} else {
339 		area->store = (void *)store;
340 		area->u.hashes = hashes;
341 		at = AREA_TYPE_RO;
342 	}
343 out:
344 	area->pgt = pgt;
345 	area->base = base;
346 	area->size = size;
347 	area->flags = flags;
348 	area->type = at;
349 	return area;
350 bad:
351 	tee_mm_free(mm_store);
352 	free(area->u.rwp);
353 	free(area);
354 	return NULL;
355 }
356 
357 static void area_insert_tail(struct tee_pager_area *area)
358 {
359 	uint32_t exceptions = pager_lock(NULL);
360 
361 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
362 
363 	pager_unlock(exceptions);
364 }
365 KEEP_PAGER(area_insert_tail);
366 
367 static size_t tbl_usage_count(struct pgt *pgt)
368 {
369 	size_t n;
370 	paddr_t pa;
371 	size_t usage = 0;
372 
373 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
374 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
375 					     n, &pa, NULL);
376 		if (pa)
377 			usage++;
378 	}
379 	return usage;
380 }
381 
382 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
383 			const void *store, const void *hashes)
384 {
385 	struct tee_pager_area *area;
386 	size_t tbl_va_size;
387 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
388 
389 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
390 		base, base + size, flags, store, hashes);
391 
392 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
393 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
394 		panic();
395 	}
396 
397 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
398 		panic("write pages cannot provide store or hashes");
399 
400 	if ((flags & TEE_MATTR_PW) && (store || hashes))
401 		panic("non-write pages must provide store and hashes");
402 
403 	if (!pager_core_pgt.tbl) {
404 		pager_core_pgt.tbl = ti->table;
405 		pgt_set_used_entries(&pager_core_pgt,
406 				     tbl_usage_count(&pager_core_pgt));
407 	}
408 
409 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
410 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
411 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
412 			base, size, ti->va_base, tbl_va_size);
413 		return false;
414 	}
415 
416 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
417 	if (!area)
418 		return false;
419 
420 	area_insert_tail(area);
421 	return true;
422 }
423 
424 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
425 					vaddr_t va)
426 {
427 	struct tee_pager_area *area;
428 
429 	if (!areas)
430 		return NULL;
431 
432 	TAILQ_FOREACH(area, areas, link) {
433 		if (core_is_buffer_inside(va, 1, area->base, area->size))
434 			return area;
435 	}
436 	return NULL;
437 }
438 
439 #ifdef CFG_PAGED_USER_TA
440 static struct tee_pager_area *find_uta_area(vaddr_t va)
441 {
442 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
443 
444 	if (!ctx || !is_user_ta_ctx(ctx))
445 		return NULL;
446 	return find_area(to_user_ta_ctx(ctx)->areas, va);
447 }
448 #else
449 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
450 {
451 	return NULL;
452 }
453 #endif /*CFG_PAGED_USER_TA*/
454 
455 
456 static uint32_t get_area_mattr(uint32_t area_flags)
457 {
458 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
459 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
460 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
461 
462 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
463 		attr |= TEE_MATTR_GLOBAL;
464 
465 	return attr;
466 }
467 
468 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
469 {
470 	paddr_t pa;
471 	unsigned idx;
472 
473 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
474 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
475 	return pa;
476 }
477 
478 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
479 			void *dst)
480 {
481 	struct pager_aes_gcm_iv iv = {
482 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
483 	};
484 
485 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
486 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
487 }
488 
489 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
490 {
491 	struct pager_aes_gcm_iv iv;
492 
493 	assert((rwp->iv + 1) > rwp->iv);
494 	rwp->iv++;
495 	/*
496 	 * IV is constructed as recommended in section "8.2.1 Deterministic
497 	 * Construction" of "Recommendation for Block Cipher Modes of
498 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
499 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
500 	 */
501 	iv.iv[0] = (vaddr_t)rwp;
502 	iv.iv[1] = rwp->iv >> 32;
503 	iv.iv[2] = rwp->iv;
504 
505 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
506 				   &iv, rwp->tag,
507 				   src, dst, SMALL_PAGE_SIZE))
508 		panic("gcm failed");
509 }
510 
511 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
512 			void *va_alias)
513 {
514 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
515 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
516 	struct core_mmu_table_info *ti;
517 	uint32_t attr_alias;
518 	paddr_t pa_alias;
519 	unsigned int idx_alias;
520 
521 	/* Insure we are allowed to write to aliased virtual page */
522 	ti = &pager_alias_tbl_info;
523 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
524 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
525 	if (!(attr_alias & TEE_MATTR_PW)) {
526 		attr_alias |= TEE_MATTR_PW;
527 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
528 		tlbi_mva_allasid((vaddr_t)va_alias);
529 	}
530 
531 	switch (area->type) {
532 	case AREA_TYPE_RO:
533 		{
534 			const void *hash = area->u.hashes +
535 					   idx * TEE_SHA256_HASH_SIZE;
536 
537 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
538 			incr_ro_hits();
539 
540 			if (hash_sha256_check(hash, va_alias,
541 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
542 				EMSG("PH 0x%" PRIxVA " failed", page_va);
543 				panic();
544 			}
545 		}
546 		/* Forbid write to aliases for read-only (maybe exec) pages */
547 		attr_alias &= ~TEE_MATTR_PW;
548 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
549 		tlbi_mva_allasid((vaddr_t)va_alias);
550 		break;
551 	case AREA_TYPE_RW:
552 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
553 			va_alias, page_va, area->u.rwp[idx].iv);
554 		if (!area->u.rwp[idx].iv)
555 			memset(va_alias, 0, SMALL_PAGE_SIZE);
556 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
557 				       va_alias)) {
558 			EMSG("PH 0x%" PRIxVA " failed", page_va);
559 			panic();
560 		}
561 		incr_rw_hits();
562 		break;
563 	case AREA_TYPE_LOCK:
564 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
565 		memset(va_alias, 0, SMALL_PAGE_SIZE);
566 		break;
567 	default:
568 		panic();
569 	}
570 }
571 
572 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
573 {
574 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
575 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
576 
577 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
578 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
579 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
580 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
581 
582 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
583 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
584 			     stored_page);
585 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
586 			pmem->area->base + idx * SMALL_PAGE_SIZE,
587 			pmem->area->u.rwp[idx].iv);
588 	}
589 }
590 
591 static void area_get_entry(struct tee_pager_area *area, size_t idx,
592 			   paddr_t *pa, uint32_t *attr)
593 {
594 	assert(area->pgt);
595 	assert(idx < tee_pager_tbl_info.num_entries);
596 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
597 				     idx, pa, attr);
598 }
599 
600 static void area_set_entry(struct tee_pager_area *area, size_t idx,
601 			   paddr_t pa, uint32_t attr)
602 {
603 	assert(area->pgt);
604 	assert(idx < tee_pager_tbl_info.num_entries);
605 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
606 				     idx, pa, attr);
607 }
608 
609 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
610 {
611 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
612 }
613 
614 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
615 					 size_t idx)
616 {
617 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
618 }
619 
620 #ifdef CFG_PAGED_USER_TA
621 static void free_area(struct tee_pager_area *area)
622 {
623 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
624 				virt_to_phys(area->store)));
625 	if (area->type == AREA_TYPE_RW)
626 		free(area->u.rwp);
627 	free(area);
628 }
629 
630 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
631 			       size_t size)
632 {
633 	struct tee_pager_area *area;
634 	uint32_t flags;
635 	vaddr_t b = base;
636 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
637 
638 	if (!utc->areas) {
639 		utc->areas = malloc(sizeof(*utc->areas));
640 		if (!utc->areas)
641 			return false;
642 		TAILQ_INIT(utc->areas);
643 	}
644 
645 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
646 
647 	while (s) {
648 		size_t s2;
649 
650 		if (find_area(utc->areas, b))
651 			return false;
652 
653 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
654 
655 		/* Table info will be set when the context is activated. */
656 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
657 		if (!area)
658 			return false;
659 		TAILQ_INSERT_TAIL(utc->areas, area, link);
660 		b += s2;
661 		s -= s2;
662 	}
663 
664 	return true;
665 }
666 
667 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
668 {
669 	struct thread_specific_data *tsd = thread_get_tsd();
670 	struct tee_pager_area *area;
671 	struct core_mmu_table_info dir_info = { NULL };
672 
673 	if (&utc->ctx != tsd->ctx) {
674 		/*
675 		 * Changes are to an utc that isn't active. Just add the
676 		 * areas page tables will be dealt with later.
677 		 */
678 		return pager_add_uta_area(utc, base, size);
679 	}
680 
681 	/*
682 	 * Assign page tables before adding areas to be able to tell which
683 	 * are newly added and should be removed in case of failure.
684 	 */
685 	tee_pager_assign_uta_tables(utc);
686 	if (!pager_add_uta_area(utc, base, size)) {
687 		struct tee_pager_area *next_a;
688 
689 		/* Remove all added areas */
690 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
691 			if (!area->pgt) {
692 				TAILQ_REMOVE(utc->areas, area, link);
693 				free_area(area);
694 			}
695 		}
696 		return false;
697 	}
698 
699 	/*
700 	 * Assign page tables to the new areas and make sure that the page
701 	 * tables are registered in the upper table.
702 	 */
703 	tee_pager_assign_uta_tables(utc);
704 	core_mmu_get_user_pgdir(&dir_info);
705 	TAILQ_FOREACH(area, utc->areas, link) {
706 		paddr_t pa;
707 		size_t idx;
708 		uint32_t attr;
709 
710 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
711 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
712 
713 		/*
714 		 * Check if the page table already is used, if it is, it's
715 		 * already registered.
716 		 */
717 		if (area->pgt->num_used_entries) {
718 			assert(attr & TEE_MATTR_TABLE);
719 			assert(pa == virt_to_phys(area->pgt->tbl));
720 			continue;
721 		}
722 
723 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
724 		pa = virt_to_phys(area->pgt->tbl);
725 		assert(pa);
726 		/*
727 		 * Note that the update of the table entry is guaranteed to
728 		 * be atomic.
729 		 */
730 		core_mmu_set_entry(&dir_info, idx, pa, attr);
731 	}
732 
733 	return true;
734 }
735 
736 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
737 				   struct pgt *pgt)
738 {
739 	assert(pgt);
740 	ti->table = pgt->tbl;
741 	ti->va_base = pgt->vabase;
742 	ti->level = tee_pager_tbl_info.level;
743 	ti->shift = tee_pager_tbl_info.shift;
744 	ti->num_entries = tee_pager_tbl_info.num_entries;
745 }
746 
747 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
748 			   vaddr_t new_base)
749 {
750 	uint32_t exceptions = pager_lock(NULL);
751 
752 	/*
753 	 * If there's no pgt assigned to the old area there's no pages to
754 	 * deal with either, just update with a new pgt and base.
755 	 */
756 	if (area->pgt) {
757 		struct core_mmu_table_info old_ti;
758 		struct core_mmu_table_info new_ti;
759 		struct tee_pager_pmem *pmem;
760 
761 		init_tbl_info_from_pgt(&old_ti, area->pgt);
762 		init_tbl_info_from_pgt(&new_ti, new_pgt);
763 
764 
765 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
766 			vaddr_t va;
767 			paddr_t pa;
768 			uint32_t attr;
769 
770 			if (pmem->area != area)
771 				continue;
772 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
773 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
774 
775 			assert(pa == get_pmem_pa(pmem));
776 			assert(attr);
777 			assert(area->pgt->num_used_entries);
778 			area->pgt->num_used_entries--;
779 
780 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
781 			va = va - area->base + new_base;
782 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
783 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
784 			new_pgt->num_used_entries++;
785 		}
786 	}
787 
788 	area->pgt = new_pgt;
789 	area->base = new_base;
790 	pager_unlock(exceptions);
791 }
792 KEEP_PAGER(transpose_area);
793 
794 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
795 				   vaddr_t src_base,
796 				   struct user_ta_ctx *dst_utc,
797 				   vaddr_t dst_base, struct pgt **dst_pgt,
798 				   size_t size)
799 {
800 	struct tee_pager_area *area;
801 	struct tee_pager_area *next_a;
802 
803 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
804 		vaddr_t new_area_base;
805 		size_t new_idx;
806 
807 		if (!core_is_buffer_inside(area->base, area->size,
808 					  src_base, size))
809 			continue;
810 
811 		TAILQ_REMOVE(src_utc->areas, area, link);
812 
813 		new_area_base = dst_base + (src_base - area->base);
814 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
815 			  CORE_MMU_PGDIR_SIZE;
816 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
817 		       dst_pgt[new_idx]->vabase);
818 		transpose_area(area, dst_pgt[new_idx], new_area_base);
819 
820 		/*
821 		 * Assert that this will not cause any conflicts in the new
822 		 * utc.  This should already be guaranteed, but a bug here
823 		 * could be tricky to find.
824 		 */
825 		assert(!find_area(dst_utc->areas, area->base));
826 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
827 	}
828 }
829 
830 static void rem_area(struct tee_pager_area_head *area_head,
831 		     struct tee_pager_area *area)
832 {
833 	struct tee_pager_pmem *pmem;
834 	uint32_t exceptions;
835 
836 	exceptions = pager_lock(NULL);
837 
838 	TAILQ_REMOVE(area_head, area, link);
839 
840 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
841 		if (pmem->area == area) {
842 			area_set_entry(area, pmem->pgidx, 0, 0);
843 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
844 			pgt_dec_used_entries(area->pgt);
845 			pmem->area = NULL;
846 			pmem->pgidx = INVALID_PGIDX;
847 		}
848 	}
849 
850 	pager_unlock(exceptions);
851 	free_area(area);
852 }
853 KEEP_PAGER(rem_area);
854 
855 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
856 			      size_t size)
857 {
858 	struct tee_pager_area *area;
859 	struct tee_pager_area *next_a;
860 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
861 
862 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
863 		if (core_is_buffer_inside(area->base, area->size, base, s))
864 			rem_area(utc->areas, area);
865 	}
866 }
867 
868 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
869 {
870 	struct tee_pager_area *area;
871 
872 	if (!utc->areas)
873 		return;
874 
875 	while (true) {
876 		area = TAILQ_FIRST(utc->areas);
877 		if (!area)
878 			break;
879 		TAILQ_REMOVE(utc->areas, area, link);
880 		free_area(area);
881 	}
882 
883 	free(utc->areas);
884 }
885 
886 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
887 				 size_t size, uint32_t flags)
888 {
889 	bool ret;
890 	vaddr_t b = base;
891 	size_t s = size;
892 	size_t s2;
893 	struct tee_pager_area *area = find_area(utc->areas, b);
894 	uint32_t exceptions;
895 	struct tee_pager_pmem *pmem;
896 	paddr_t pa;
897 	uint32_t a;
898 	uint32_t f;
899 
900 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
901 	if (f & TEE_MATTR_UW)
902 		f |= TEE_MATTR_PW;
903 	f = get_area_mattr(f);
904 
905 	exceptions = pager_lock(NULL);
906 
907 	while (s) {
908 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
909 		if (!area || area->base != b || area->size != s2) {
910 			ret = false;
911 			goto out;
912 		}
913 		b += s2;
914 		s -= s2;
915 
916 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
917 			if (pmem->area != area)
918 				continue;
919 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
920 			if (a & TEE_MATTR_VALID_BLOCK)
921 				assert(pa == get_pmem_pa(pmem));
922 			else
923 				pa = get_pmem_pa(pmem);
924 			if (a == f)
925 				continue;
926 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
927 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
928 			if (!(flags & TEE_MATTR_UW))
929 				tee_pager_save_page(pmem, a);
930 
931 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
932 			/*
933 			 * Make sure the table update is visible before
934 			 * continuing.
935 			 */
936 			dsb_ishst();
937 
938 			if (flags & TEE_MATTR_UX) {
939 				void *va = (void *)area_idx2va(pmem->area,
940 							       pmem->pgidx);
941 
942 				cache_op_inner(DCACHE_AREA_CLEAN, va,
943 						SMALL_PAGE_SIZE);
944 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
945 						SMALL_PAGE_SIZE);
946 			}
947 		}
948 
949 		area->flags = f;
950 		area = TAILQ_NEXT(area, link);
951 	}
952 
953 	ret = true;
954 out:
955 	pager_unlock(exceptions);
956 	return ret;
957 }
958 KEEP_PAGER(tee_pager_set_uta_area_attr);
959 #endif /*CFG_PAGED_USER_TA*/
960 
961 static bool tee_pager_unhide_page(vaddr_t page_va)
962 {
963 	struct tee_pager_pmem *pmem;
964 
965 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
966 		paddr_t pa;
967 		uint32_t attr;
968 
969 		if (pmem->pgidx == INVALID_PGIDX)
970 			continue;
971 
972 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
973 
974 		if (!(attr &
975 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
976 			continue;
977 
978 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
979 			uint32_t a = get_area_mattr(pmem->area->flags);
980 
981 			/* page is hidden, show and move to back */
982 			if (pa != get_pmem_pa(pmem))
983 				panic("unexpected pa");
984 
985 			/*
986 			 * If it's not a dirty block, then it should be
987 			 * read only.
988 			 */
989 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
990 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
991 			else
992 				FMSG("Unhide %#" PRIxVA, page_va);
993 
994 			if (page_va == 0x8000a000)
995 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
996 					page_va, a);
997 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
998 
999 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1000 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1001 
1002 			tlbi_mva_allasid(page_va);
1003 
1004 			incr_hidden_hits();
1005 			return true;
1006 		}
1007 	}
1008 
1009 	return false;
1010 }
1011 
1012 static void tee_pager_hide_pages(void)
1013 {
1014 	struct tee_pager_pmem *pmem;
1015 	size_t n = 0;
1016 
1017 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1018 		paddr_t pa;
1019 		uint32_t attr;
1020 		uint32_t a;
1021 
1022 		if (n >= TEE_PAGER_NHIDE)
1023 			break;
1024 		n++;
1025 
1026 		/* we cannot hide pages when pmem->area is not defined. */
1027 		if (!pmem->area)
1028 			continue;
1029 
1030 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1031 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1032 			continue;
1033 
1034 		assert(pa == get_pmem_pa(pmem));
1035 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1036 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1037 			FMSG("Hide %#" PRIxVA,
1038 			     area_idx2va(pmem->area, pmem->pgidx));
1039 		} else
1040 			a = TEE_MATTR_HIDDEN_BLOCK;
1041 
1042 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1043 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1044 	}
1045 }
1046 
1047 /*
1048  * Find mapped pmem, hide and move to pageble pmem.
1049  * Return false if page was not mapped, and true if page was mapped.
1050  */
1051 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1052 				       vaddr_t page_va)
1053 {
1054 	struct tee_pager_pmem *pmem;
1055 	unsigned pgidx;
1056 	paddr_t pa;
1057 	uint32_t attr;
1058 
1059 	pgidx = area_va2idx(area, page_va);
1060 	area_get_entry(area, pgidx, &pa, &attr);
1061 
1062 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1063 
1064 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1065 		if (pmem->area != area || pmem->pgidx != pgidx)
1066 			continue;
1067 
1068 		assert(pa == get_pmem_pa(pmem));
1069 		area_set_entry(area, pgidx, 0, 0);
1070 		pgt_dec_used_entries(area->pgt);
1071 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1072 		pmem->area = NULL;
1073 		pmem->pgidx = INVALID_PGIDX;
1074 		tee_pager_npages++;
1075 		set_npages();
1076 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1077 		incr_zi_released();
1078 		return true;
1079 	}
1080 
1081 	return false;
1082 }
1083 
1084 /* Finds the oldest page and unmats it from its old virtual address */
1085 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1086 {
1087 	struct tee_pager_pmem *pmem;
1088 
1089 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1090 	if (!pmem) {
1091 		EMSG("No pmem entries");
1092 		return NULL;
1093 	}
1094 	if (pmem->pgidx != INVALID_PGIDX) {
1095 		uint32_t a;
1096 
1097 		assert(pmem->area && pmem->area->pgt);
1098 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1099 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1100 		pgt_dec_used_entries(pmem->area->pgt);
1101 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1102 		tee_pager_save_page(pmem, a);
1103 	}
1104 
1105 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1106 	pmem->pgidx = INVALID_PGIDX;
1107 	pmem->area = NULL;
1108 	if (area->type == AREA_TYPE_LOCK) {
1109 		/* Move page to lock list */
1110 		if (tee_pager_npages <= 0)
1111 			panic("running out of page");
1112 		tee_pager_npages--;
1113 		set_npages();
1114 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1115 	} else {
1116 		/* move page to back */
1117 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1118 	}
1119 
1120 	return pmem;
1121 }
1122 
1123 static bool pager_update_permissions(struct tee_pager_area *area,
1124 			struct abort_info *ai, bool *handled)
1125 {
1126 	unsigned int pgidx = area_va2idx(area, ai->va);
1127 	uint32_t attr;
1128 	paddr_t pa;
1129 
1130 	*handled = false;
1131 
1132 	area_get_entry(area, pgidx, &pa, &attr);
1133 
1134 	/* Not mapped */
1135 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1136 		return false;
1137 
1138 	/* Not readable, should not happen */
1139 	if (abort_is_user_exception(ai)) {
1140 		if (!(attr & TEE_MATTR_UR))
1141 			return true;
1142 	} else {
1143 		if (!(attr & TEE_MATTR_PR)) {
1144 			abort_print_error(ai);
1145 			panic();
1146 		}
1147 	}
1148 
1149 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1150 	case CORE_MMU_FAULT_TRANSLATION:
1151 	case CORE_MMU_FAULT_READ_PERMISSION:
1152 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1153 			/* Check attempting to execute from an NOX page */
1154 			if (abort_is_user_exception(ai)) {
1155 				if (!(attr & TEE_MATTR_UX))
1156 					return true;
1157 			} else {
1158 				if (!(attr & TEE_MATTR_PX)) {
1159 					abort_print_error(ai);
1160 					panic();
1161 				}
1162 			}
1163 		}
1164 		/* Since the page is mapped now it's OK */
1165 		break;
1166 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1167 		/* Check attempting to write to an RO page */
1168 		if (abort_is_user_exception(ai)) {
1169 			if (!(area->flags & TEE_MATTR_UW))
1170 				return true;
1171 			if (!(attr & TEE_MATTR_UW)) {
1172 				FMSG("Dirty %p",
1173 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1174 				area_set_entry(area, pgidx, pa,
1175 					       get_area_mattr(area->flags));
1176 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1177 			}
1178 
1179 		} else {
1180 			if (!(area->flags & TEE_MATTR_PW)) {
1181 				abort_print_error(ai);
1182 				panic();
1183 			}
1184 			if (!(attr & TEE_MATTR_PW)) {
1185 				FMSG("Dirty %p",
1186 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1187 				area_set_entry(area, pgidx, pa,
1188 					       get_area_mattr(area->flags));
1189 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1190 			}
1191 		}
1192 		/* Since permissions has been updated now it's OK */
1193 		break;
1194 	default:
1195 		/* Some fault we can't deal with */
1196 		if (abort_is_user_exception(ai))
1197 			return true;
1198 		abort_print_error(ai);
1199 		panic();
1200 	}
1201 	*handled = true;
1202 	return true;
1203 }
1204 
1205 #ifdef CFG_TEE_CORE_DEBUG
1206 static void stat_handle_fault(void)
1207 {
1208 	static size_t num_faults;
1209 	static size_t min_npages = SIZE_MAX;
1210 	static size_t total_min_npages = SIZE_MAX;
1211 
1212 	num_faults++;
1213 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1214 		DMSG("nfaults %zu npages %zu (min %zu)",
1215 		     num_faults, tee_pager_npages, min_npages);
1216 		min_npages = tee_pager_npages; /* reset */
1217 	}
1218 	if (tee_pager_npages < min_npages)
1219 		min_npages = tee_pager_npages;
1220 	if (tee_pager_npages < total_min_npages)
1221 		total_min_npages = tee_pager_npages;
1222 }
1223 #else
1224 static void stat_handle_fault(void)
1225 {
1226 }
1227 #endif
1228 
1229 bool tee_pager_handle_fault(struct abort_info *ai)
1230 {
1231 	struct tee_pager_area *area;
1232 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1233 	uint32_t exceptions;
1234 	bool ret;
1235 
1236 #ifdef TEE_PAGER_DEBUG_PRINT
1237 	abort_print(ai);
1238 #endif
1239 
1240 	/*
1241 	 * We're updating pages that can affect several active CPUs at a
1242 	 * time below. We end up here because a thread tries to access some
1243 	 * memory that isn't available. We have to be careful when making
1244 	 * that memory available as other threads may succeed in accessing
1245 	 * that address the moment after we've made it available.
1246 	 *
1247 	 * That means that we can't just map the memory and populate the
1248 	 * page, instead we use the aliased mapping to populate the page
1249 	 * and once everything is ready we map it.
1250 	 */
1251 	exceptions = pager_lock(ai);
1252 
1253 	stat_handle_fault();
1254 
1255 	/* check if the access is valid */
1256 	if (abort_is_user_exception(ai)) {
1257 		area = find_uta_area(ai->va);
1258 
1259 	} else {
1260 		area = find_area(&tee_pager_area_head, ai->va);
1261 		if (!area)
1262 			area = find_uta_area(ai->va);
1263 	}
1264 	if (!area || !area->pgt) {
1265 		ret = false;
1266 		goto out;
1267 	}
1268 
1269 	if (!tee_pager_unhide_page(page_va)) {
1270 		struct tee_pager_pmem *pmem = NULL;
1271 		uint32_t attr;
1272 
1273 		/*
1274 		 * The page wasn't hidden, but some other core may have
1275 		 * updated the table entry before we got here or we need
1276 		 * to make a read-only page read-write (dirty).
1277 		 */
1278 		if (pager_update_permissions(area, ai, &ret)) {
1279 			/*
1280 			 * Nothing more to do with the abort. The problem
1281 			 * could already have been dealt with from another
1282 			 * core or if ret is false the TA will be paniced.
1283 			 */
1284 			goto out;
1285 		}
1286 
1287 		pmem = tee_pager_get_page(area);
1288 		if (!pmem) {
1289 			abort_print(ai);
1290 			panic();
1291 		}
1292 
1293 		/* load page code & data */
1294 		tee_pager_load_page(area, page_va, pmem->va_alias);
1295 
1296 		/*
1297 		 * We've updated the page using the aliased mapping and
1298 		 * some cache maintenence is now needed if it's an
1299 		 * executable page.
1300 		 *
1301 		 * Since the d-cache is a Physically-indexed,
1302 		 * physically-tagged (PIPT) cache we can clean the aliased
1303 		 * address instead of the real virtual address.
1304 		 *
1305 		 * The i-cache can also be PIPT, but may be something else
1306 		 * to, to keep it simple we invalidate the entire i-cache.
1307 		 * As a future optimization we may invalidate only the
1308 		 * aliased area if it a PIPT cache else the entire cache.
1309 		 */
1310 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1311 			/*
1312 			 * Doing these operations to LoUIS (Level of
1313 			 * unification, Inner Shareable) would be enough
1314 			 */
1315 			cache_op_inner(DCACHE_AREA_CLEAN, pmem->va_alias,
1316 					SMALL_PAGE_SIZE);
1317 			cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
1318 		}
1319 
1320 		pmem->area = area;
1321 		pmem->pgidx = area_va2idx(area, ai->va);
1322 		attr = get_area_mattr(area->flags) &
1323 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1324 		area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1325 		/* No need to flush TLB for this entry, it was invalid */
1326 		pgt_inc_used_entries(area->pgt);
1327 
1328 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1329 		     area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1330 
1331 	}
1332 
1333 	tee_pager_hide_pages();
1334 	ret = true;
1335 out:
1336 	pager_unlock(exceptions);
1337 	return ret;
1338 }
1339 
1340 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1341 {
1342 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1343 	size_t n;
1344 
1345 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1346 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1347 
1348 	/* setup memory */
1349 	for (n = 0; n < npages; n++) {
1350 		struct tee_pager_pmem *pmem;
1351 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1352 		unsigned pgidx = core_mmu_va2idx(ti, va);
1353 		paddr_t pa;
1354 		uint32_t attr;
1355 
1356 		/*
1357 		 * Note that we can only support adding pages in the
1358 		 * valid range of this table info, currently not a problem.
1359 		 */
1360 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1361 
1362 		/* Ignore unmapped pages/blocks */
1363 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1364 			continue;
1365 
1366 		pmem = malloc(sizeof(struct tee_pager_pmem));
1367 		if (!pmem)
1368 			panic("out of mem");
1369 
1370 		pmem->va_alias = pager_add_alias_page(pa);
1371 
1372 		if (unmap) {
1373 			pmem->area = NULL;
1374 			pmem->pgidx = INVALID_PGIDX;
1375 			core_mmu_set_entry(ti, pgidx, 0, 0);
1376 			pgt_dec_used_entries(&pager_core_pgt);
1377 		} else {
1378 			/*
1379 			 * The page is still mapped, let's assign the area
1380 			 * and update the protection bits accordingly.
1381 			 */
1382 			pmem->area = find_area(&tee_pager_area_head, va);
1383 			assert(pmem->area->pgt == &pager_core_pgt);
1384 			pmem->pgidx = pgidx;
1385 			assert(pa == get_pmem_pa(pmem));
1386 			area_set_entry(pmem->area, pgidx, pa,
1387 				       get_area_mattr(pmem->area->flags));
1388 		}
1389 
1390 		tee_pager_npages++;
1391 		incr_npages_all();
1392 		set_npages();
1393 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1394 	}
1395 
1396 	/*
1397 	 * As this is done at inits, invalidate all TLBs once instead of
1398 	 * targeting only the modified entries.
1399 	 */
1400 	tlbi_all();
1401 }
1402 
1403 #ifdef CFG_PAGED_USER_TA
1404 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1405 {
1406 	struct pgt *p = pgt;
1407 
1408 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1409 		p = SLIST_NEXT(p, link);
1410 	return p;
1411 }
1412 
1413 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1414 {
1415 	struct tee_pager_area *area;
1416 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1417 
1418 	TAILQ_FOREACH(area, utc->areas, link) {
1419 		if (!area->pgt)
1420 			area->pgt = find_pgt(pgt, area->base);
1421 		else
1422 			assert(area->pgt == find_pgt(pgt, area->base));
1423 		if (!area->pgt)
1424 			panic();
1425 	}
1426 }
1427 
1428 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1429 {
1430 	uint32_t attr;
1431 
1432 	assert(pmem->area && pmem->area->pgt);
1433 
1434 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1435 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1436 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1437 	tee_pager_save_page(pmem, attr);
1438 	assert(pmem->area->pgt->num_used_entries);
1439 	pmem->area->pgt->num_used_entries--;
1440 	pmem->pgidx = INVALID_PGIDX;
1441 	pmem->area = NULL;
1442 }
1443 
1444 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1445 {
1446 	struct tee_pager_pmem *pmem;
1447 	struct tee_pager_area *area;
1448 	uint32_t exceptions = pager_lock(NULL);
1449 
1450 	if (!pgt->num_used_entries)
1451 		goto out;
1452 
1453 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1454 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1455 			continue;
1456 		if (pmem->area->pgt == pgt)
1457 			pager_save_and_release_entry(pmem);
1458 	}
1459 	assert(!pgt->num_used_entries);
1460 
1461 out:
1462 	if (is_user_ta_ctx(pgt->ctx)) {
1463 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1464 			if (area->pgt == pgt)
1465 				area->pgt = NULL;
1466 		}
1467 	}
1468 
1469 	pager_unlock(exceptions);
1470 }
1471 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1472 #endif /*CFG_PAGED_USER_TA*/
1473 
1474 void tee_pager_release_phys(void *addr, size_t size)
1475 {
1476 	bool unmaped = false;
1477 	vaddr_t va = (vaddr_t)addr;
1478 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1479 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1480 	struct tee_pager_area *area;
1481 	uint32_t exceptions;
1482 
1483 	if (end <= begin)
1484 		return;
1485 
1486 	area = find_area(&tee_pager_area_head, begin);
1487 	if (!area ||
1488 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1489 		panic();
1490 
1491 	exceptions = pager_lock(NULL);
1492 
1493 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1494 		unmaped |= tee_pager_release_one_phys(area, va);
1495 
1496 	if (unmaped)
1497 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1498 
1499 	pager_unlock(exceptions);
1500 }
1501 KEEP_PAGER(tee_pager_release_phys);
1502 
1503 void *tee_pager_alloc(size_t size, uint32_t flags)
1504 {
1505 	tee_mm_entry_t *mm;
1506 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1507 
1508 	if (!size)
1509 		return NULL;
1510 
1511 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1512 	if (!mm)
1513 		return NULL;
1514 
1515 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1516 				f, NULL, NULL);
1517 
1518 	return (void *)tee_mm_get_smem(mm);
1519 }
1520