xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision e84e1feccbdbd9deae5ad2dea921f4f624e8ad6d)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <io.h>
32 #include <keep.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <kernel/tlb_helpers.h>
40 #include <mm/core_memprot.h>
41 #include <mm/tee_mm.h>
42 #include <mm/tee_pager.h>
43 #include <stdlib.h>
44 #include <sys/queue.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
52 #include "pager_private.h"
53 
54 #define PAGER_AE_KEY_BITS	256
55 
56 struct pager_rw_pstate {
57 	uint64_t iv;
58 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
59 };
60 
61 enum area_type {
62 	AREA_TYPE_RO,
63 	AREA_TYPE_RW,
64 	AREA_TYPE_LOCK,
65 };
66 
67 struct tee_pager_area {
68 	union {
69 		const uint8_t *hashes;
70 		struct pager_rw_pstate *rwp;
71 	} u;
72 	uint8_t *store;
73 	enum area_type type;
74 	uint32_t flags;
75 	vaddr_t base;
76 	size_t size;
77 	struct pgt *pgt;
78 	TAILQ_ENTRY(tee_pager_area) link;
79 };
80 
81 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
82 
83 static struct tee_pager_area_head tee_pager_area_head =
84 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
85 
86 #define INVALID_PGIDX	UINT_MAX
87 
88 /*
89  * struct tee_pager_pmem - Represents a physical page used for paging.
90  *
91  * @pgidx	an index of the entry in area->ti.
92  * @va_alias	Virtual address where the physical page always is aliased.
93  *		Used during remapping of the page when the content need to
94  *		be updated before it's available at the new location.
95  * @area	a pointer to the pager area
96  */
97 struct tee_pager_pmem {
98 	unsigned pgidx;
99 	void *va_alias;
100 	struct tee_pager_area *area;
101 	TAILQ_ENTRY(tee_pager_pmem) link;
102 };
103 
104 /* The list of physical pages. The first page in the list is the oldest */
105 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
106 
107 static struct tee_pager_pmem_head tee_pager_pmem_head =
108 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
109 
110 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
111 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
112 
113 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
114 
115 /* number of pages hidden */
116 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
117 
118 /* Number of registered physical pages, used hiding pages. */
119 static size_t tee_pager_npages;
120 
121 #ifdef CFG_WITH_STATS
122 static struct tee_pager_stats pager_stats;
123 
124 static inline void incr_ro_hits(void)
125 {
126 	pager_stats.ro_hits++;
127 }
128 
129 static inline void incr_rw_hits(void)
130 {
131 	pager_stats.rw_hits++;
132 }
133 
134 static inline void incr_hidden_hits(void)
135 {
136 	pager_stats.hidden_hits++;
137 }
138 
139 static inline void incr_zi_released(void)
140 {
141 	pager_stats.zi_released++;
142 }
143 
144 static inline void incr_npages_all(void)
145 {
146 	pager_stats.npages_all++;
147 }
148 
149 static inline void set_npages(void)
150 {
151 	pager_stats.npages = tee_pager_npages;
152 }
153 
154 void tee_pager_get_stats(struct tee_pager_stats *stats)
155 {
156 	*stats = pager_stats;
157 
158 	pager_stats.hidden_hits = 0;
159 	pager_stats.ro_hits = 0;
160 	pager_stats.rw_hits = 0;
161 	pager_stats.zi_released = 0;
162 }
163 
164 #else /* CFG_WITH_STATS */
165 static inline void incr_ro_hits(void) { }
166 static inline void incr_rw_hits(void) { }
167 static inline void incr_hidden_hits(void) { }
168 static inline void incr_zi_released(void) { }
169 static inline void incr_npages_all(void) { }
170 static inline void set_npages(void) { }
171 
172 void tee_pager_get_stats(struct tee_pager_stats *stats)
173 {
174 	memset(stats, 0, sizeof(struct tee_pager_stats));
175 }
176 #endif /* CFG_WITH_STATS */
177 
178 static struct pgt pager_core_pgt;
179 struct core_mmu_table_info tee_pager_tbl_info;
180 static struct core_mmu_table_info pager_alias_tbl_info;
181 
182 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
183 
184 /* Defines the range of the alias area */
185 static tee_mm_entry_t *pager_alias_area;
186 /*
187  * Physical pages are added in a stack like fashion to the alias area,
188  * @pager_alias_next_free gives the address of next free entry if
189  * @pager_alias_next_free is != 0
190  */
191 static uintptr_t pager_alias_next_free;
192 
193 #ifdef CFG_TEE_CORE_DEBUG
194 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
195 
196 static uint32_t pager_lock_dldetect(const char *func, const int line,
197 				    struct abort_info *ai)
198 {
199 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
200 	unsigned int retries = 0;
201 	unsigned int reminder = 0;
202 
203 	while (!cpu_spin_trylock(&pager_spinlock)) {
204 		retries++;
205 		if (!retries) {
206 			/* wrapped, time to report */
207 			trace_printf(func, line, TRACE_ERROR, true,
208 				     "possible spinlock deadlock reminder %u",
209 				     reminder);
210 			if (reminder < UINT_MAX)
211 				reminder++;
212 			if (ai)
213 				abort_print(ai);
214 		}
215 	}
216 
217 	return exceptions;
218 }
219 #else
220 static uint32_t pager_lock(struct abort_info __unused *ai)
221 {
222 	return cpu_spin_lock_xsave(&pager_spinlock);
223 }
224 #endif
225 
226 static uint32_t pager_lock_check_stack(size_t stack_size)
227 {
228 	if (stack_size) {
229 		int8_t buf[stack_size];
230 		size_t n;
231 
232 		/*
233 		 * Make sure to touch all pages of the stack that we expect
234 		 * to use with this lock held. We need to take eventual
235 		 * page faults before the lock is taken or we'll deadlock
236 		 * the pager. The pages that are populated in this way will
237 		 * eventually be released at certain save transitions of
238 		 * the thread.
239 		 */
240 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
241 			write8(1, (vaddr_t)buf + n);
242 		write8(1, (vaddr_t)buf + stack_size - 1);
243 	}
244 
245 	return pager_lock(NULL);
246 }
247 
248 static void pager_unlock(uint32_t exceptions)
249 {
250 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
251 }
252 
253 static void set_alias_area(tee_mm_entry_t *mm)
254 {
255 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
256 	size_t tbl_va_size;
257 	unsigned idx;
258 	unsigned last_idx;
259 	vaddr_t smem = tee_mm_get_smem(mm);
260 	size_t nbytes = tee_mm_get_bytes(mm);
261 
262 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
263 
264 	if (pager_alias_area)
265 		panic("null pager_alias_area");
266 
267 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
268 		panic("Can't find translation table");
269 
270 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
271 		panic("Unsupported page size in translation table");
272 
273 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
274 	if (!core_is_buffer_inside(smem, nbytes,
275 				   ti->va_base, tbl_va_size)) {
276 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
277 		     smem, nbytes, ti->va_base, tbl_va_size);
278 		panic();
279 	}
280 
281 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
282 		panic("invalid area alignment");
283 
284 	pager_alias_area = mm;
285 	pager_alias_next_free = smem;
286 
287 	/* Clear all mapping in the alias area */
288 	idx = core_mmu_va2idx(ti, smem);
289 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
290 	for (; idx < last_idx; idx++)
291 		core_mmu_set_entry(ti, idx, 0, 0);
292 
293 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
294 }
295 
296 static void generate_ae_key(void)
297 {
298 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
299 		panic("failed to generate random");
300 }
301 
302 void tee_pager_init(tee_mm_entry_t *mm_alias)
303 {
304 	set_alias_area(mm_alias);
305 	generate_ae_key();
306 }
307 
308 static void *pager_add_alias_page(paddr_t pa)
309 {
310 	unsigned idx;
311 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
312 	/* Alias pages mapped without write permission: runtime will care */
313 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
314 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
315 			TEE_MATTR_SECURE | TEE_MATTR_PR;
316 
317 	DMSG("0x%" PRIxPA, pa);
318 
319 	if (!pager_alias_next_free || !ti->num_entries)
320 		panic("invalid alias entry");
321 
322 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
323 	core_mmu_set_entry(ti, idx, pa, attr);
324 	pgt_inc_used_entries(&pager_core_pgt);
325 	pager_alias_next_free += SMALL_PAGE_SIZE;
326 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
327 				      tee_mm_get_bytes(pager_alias_area)))
328 		pager_alias_next_free = 0;
329 	return (void *)core_mmu_idx2va(ti, idx);
330 }
331 
332 static struct tee_pager_area *alloc_area(struct pgt *pgt,
333 					 vaddr_t base, size_t size,
334 					 uint32_t flags, const void *store,
335 					 const void *hashes)
336 {
337 	struct tee_pager_area *area = calloc(1, sizeof(*area));
338 	enum area_type at;
339 	tee_mm_entry_t *mm_store = NULL;
340 
341 	if (!area)
342 		return NULL;
343 
344 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
345 		if (flags & TEE_MATTR_LOCKED) {
346 			at = AREA_TYPE_LOCK;
347 			goto out;
348 		}
349 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
350 		if (!mm_store)
351 			goto bad;
352 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
353 					   MEM_AREA_TA_RAM);
354 		if (!area->store)
355 			goto bad;
356 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
357 				     sizeof(struct pager_rw_pstate));
358 		if (!area->u.rwp)
359 			goto bad;
360 		at = AREA_TYPE_RW;
361 	} else {
362 		area->store = (void *)store;
363 		area->u.hashes = hashes;
364 		at = AREA_TYPE_RO;
365 	}
366 out:
367 	area->pgt = pgt;
368 	area->base = base;
369 	area->size = size;
370 	area->flags = flags;
371 	area->type = at;
372 	return area;
373 bad:
374 	tee_mm_free(mm_store);
375 	free(area->u.rwp);
376 	free(area);
377 	return NULL;
378 }
379 
380 static void area_insert_tail(struct tee_pager_area *area)
381 {
382 	uint32_t exceptions = pager_lock_check_stack(8);
383 
384 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
385 
386 	pager_unlock(exceptions);
387 }
388 KEEP_PAGER(area_insert_tail);
389 
390 static size_t tbl_usage_count(struct pgt *pgt)
391 {
392 	size_t n;
393 	paddr_t pa;
394 	size_t usage = 0;
395 
396 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
397 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
398 					     n, &pa, NULL);
399 		if (pa)
400 			usage++;
401 	}
402 	return usage;
403 }
404 
405 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
406 			const void *store, const void *hashes)
407 {
408 	struct tee_pager_area *area;
409 	size_t tbl_va_size;
410 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
411 
412 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
413 		base, base + size, flags, store, hashes);
414 
415 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
416 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
417 		panic();
418 	}
419 
420 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
421 		panic("write pages cannot provide store or hashes");
422 
423 	if ((flags & TEE_MATTR_PW) && (store || hashes))
424 		panic("non-write pages must provide store and hashes");
425 
426 	if (!pager_core_pgt.tbl) {
427 		pager_core_pgt.tbl = ti->table;
428 		pgt_set_used_entries(&pager_core_pgt,
429 				     tbl_usage_count(&pager_core_pgt));
430 	}
431 
432 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
433 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
434 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
435 			base, size, ti->va_base, tbl_va_size);
436 		return false;
437 	}
438 
439 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
440 	if (!area)
441 		return false;
442 
443 	area_insert_tail(area);
444 	return true;
445 }
446 
447 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
448 					vaddr_t va)
449 {
450 	struct tee_pager_area *area;
451 
452 	if (!areas)
453 		return NULL;
454 
455 	TAILQ_FOREACH(area, areas, link) {
456 		if (core_is_buffer_inside(va, 1, area->base, area->size))
457 			return area;
458 	}
459 	return NULL;
460 }
461 
462 #ifdef CFG_PAGED_USER_TA
463 static struct tee_pager_area *find_uta_area(vaddr_t va)
464 {
465 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
466 
467 	if (!ctx || !is_user_ta_ctx(ctx))
468 		return NULL;
469 	return find_area(to_user_ta_ctx(ctx)->areas, va);
470 }
471 #else
472 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
473 {
474 	return NULL;
475 }
476 #endif /*CFG_PAGED_USER_TA*/
477 
478 
479 static uint32_t get_area_mattr(uint32_t area_flags)
480 {
481 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
482 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
483 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
484 
485 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
486 		attr |= TEE_MATTR_GLOBAL;
487 
488 	return attr;
489 }
490 
491 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
492 {
493 	paddr_t pa;
494 	unsigned idx;
495 
496 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
497 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
498 	return pa;
499 }
500 
501 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
502 			void *dst)
503 {
504 	struct pager_aes_gcm_iv iv = {
505 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
506 	};
507 
508 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
509 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
510 }
511 
512 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
513 {
514 	struct pager_aes_gcm_iv iv;
515 
516 	assert((rwp->iv + 1) > rwp->iv);
517 	rwp->iv++;
518 	/*
519 	 * IV is constructed as recommended in section "8.2.1 Deterministic
520 	 * Construction" of "Recommendation for Block Cipher Modes of
521 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
522 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
523 	 */
524 	iv.iv[0] = (vaddr_t)rwp;
525 	iv.iv[1] = rwp->iv >> 32;
526 	iv.iv[2] = rwp->iv;
527 
528 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
529 				   &iv, rwp->tag,
530 				   src, dst, SMALL_PAGE_SIZE))
531 		panic("gcm failed");
532 }
533 
534 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
535 			void *va_alias)
536 {
537 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
538 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
539 	struct core_mmu_table_info *ti;
540 	uint32_t attr_alias;
541 	paddr_t pa_alias;
542 	unsigned int idx_alias;
543 
544 	/* Insure we are allowed to write to aliased virtual page */
545 	ti = &pager_alias_tbl_info;
546 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
547 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
548 	if (!(attr_alias & TEE_MATTR_PW)) {
549 		attr_alias |= TEE_MATTR_PW;
550 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
551 		tlbi_mva_allasid((vaddr_t)va_alias);
552 	}
553 
554 	switch (area->type) {
555 	case AREA_TYPE_RO:
556 		{
557 			const void *hash = area->u.hashes +
558 					   idx * TEE_SHA256_HASH_SIZE;
559 
560 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
561 			incr_ro_hits();
562 
563 			if (hash_sha256_check(hash, va_alias,
564 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
565 				EMSG("PH 0x%" PRIxVA " failed", page_va);
566 				panic();
567 			}
568 		}
569 		/* Forbid write to aliases for read-only (maybe exec) pages */
570 		attr_alias &= ~TEE_MATTR_PW;
571 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
572 		tlbi_mva_allasid((vaddr_t)va_alias);
573 		break;
574 	case AREA_TYPE_RW:
575 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
576 			va_alias, page_va, area->u.rwp[idx].iv);
577 		if (!area->u.rwp[idx].iv)
578 			memset(va_alias, 0, SMALL_PAGE_SIZE);
579 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
580 				       va_alias)) {
581 			EMSG("PH 0x%" PRIxVA " failed", page_va);
582 			panic();
583 		}
584 		incr_rw_hits();
585 		break;
586 	case AREA_TYPE_LOCK:
587 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
588 		memset(va_alias, 0, SMALL_PAGE_SIZE);
589 		break;
590 	default:
591 		panic();
592 	}
593 }
594 
595 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
596 {
597 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
598 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
599 
600 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
601 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
602 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
603 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
604 
605 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
606 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
607 			     stored_page);
608 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
609 			pmem->area->base + idx * SMALL_PAGE_SIZE,
610 			pmem->area->u.rwp[idx].iv);
611 	}
612 }
613 
614 static void area_get_entry(struct tee_pager_area *area, size_t idx,
615 			   paddr_t *pa, uint32_t *attr)
616 {
617 	assert(area->pgt);
618 	assert(idx < tee_pager_tbl_info.num_entries);
619 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
620 				     idx, pa, attr);
621 }
622 
623 static void area_set_entry(struct tee_pager_area *area, size_t idx,
624 			   paddr_t pa, uint32_t attr)
625 {
626 	assert(area->pgt);
627 	assert(idx < tee_pager_tbl_info.num_entries);
628 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
629 				     idx, pa, attr);
630 }
631 
632 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
633 {
634 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
635 }
636 
637 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
638 					 size_t idx)
639 {
640 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
641 }
642 
643 #ifdef CFG_PAGED_USER_TA
644 static void free_area(struct tee_pager_area *area)
645 {
646 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
647 				virt_to_phys(area->store)));
648 	if (area->type == AREA_TYPE_RW)
649 		free(area->u.rwp);
650 	free(area);
651 }
652 
653 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
654 			       size_t size)
655 {
656 	struct tee_pager_area *area;
657 	uint32_t flags;
658 	vaddr_t b = base;
659 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
660 
661 	if (!utc->areas) {
662 		utc->areas = malloc(sizeof(*utc->areas));
663 		if (!utc->areas)
664 			return false;
665 		TAILQ_INIT(utc->areas);
666 	}
667 
668 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
669 
670 	while (s) {
671 		size_t s2;
672 
673 		if (find_area(utc->areas, b))
674 			return false;
675 
676 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
677 
678 		/* Table info will be set when the context is activated. */
679 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
680 		if (!area)
681 			return false;
682 		TAILQ_INSERT_TAIL(utc->areas, area, link);
683 		b += s2;
684 		s -= s2;
685 	}
686 
687 	return true;
688 }
689 
690 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
691 {
692 	struct thread_specific_data *tsd = thread_get_tsd();
693 	struct tee_pager_area *area;
694 	struct core_mmu_table_info dir_info = { NULL };
695 
696 	if (&utc->ctx != tsd->ctx) {
697 		/*
698 		 * Changes are to an utc that isn't active. Just add the
699 		 * areas page tables will be dealt with later.
700 		 */
701 		return pager_add_uta_area(utc, base, size);
702 	}
703 
704 	/*
705 	 * Assign page tables before adding areas to be able to tell which
706 	 * are newly added and should be removed in case of failure.
707 	 */
708 	tee_pager_assign_uta_tables(utc);
709 	if (!pager_add_uta_area(utc, base, size)) {
710 		struct tee_pager_area *next_a;
711 
712 		/* Remove all added areas */
713 		TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
714 			if (!area->pgt) {
715 				TAILQ_REMOVE(utc->areas, area, link);
716 				free_area(area);
717 			}
718 		}
719 		return false;
720 	}
721 
722 	/*
723 	 * Assign page tables to the new areas and make sure that the page
724 	 * tables are registered in the upper table.
725 	 */
726 	tee_pager_assign_uta_tables(utc);
727 	core_mmu_get_user_pgdir(&dir_info);
728 	TAILQ_FOREACH(area, utc->areas, link) {
729 		paddr_t pa;
730 		size_t idx;
731 		uint32_t attr;
732 
733 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
734 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
735 
736 		/*
737 		 * Check if the page table already is used, if it is, it's
738 		 * already registered.
739 		 */
740 		if (area->pgt->num_used_entries) {
741 			assert(attr & TEE_MATTR_TABLE);
742 			assert(pa == virt_to_phys(area->pgt->tbl));
743 			continue;
744 		}
745 
746 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
747 		pa = virt_to_phys(area->pgt->tbl);
748 		assert(pa);
749 		/*
750 		 * Note that the update of the table entry is guaranteed to
751 		 * be atomic.
752 		 */
753 		core_mmu_set_entry(&dir_info, idx, pa, attr);
754 	}
755 
756 	return true;
757 }
758 
759 static void init_tbl_info_from_pgt(struct core_mmu_table_info *ti,
760 				   struct pgt *pgt)
761 {
762 	assert(pgt);
763 	ti->table = pgt->tbl;
764 	ti->va_base = pgt->vabase;
765 	ti->level = tee_pager_tbl_info.level;
766 	ti->shift = tee_pager_tbl_info.shift;
767 	ti->num_entries = tee_pager_tbl_info.num_entries;
768 }
769 
770 static void transpose_area(struct tee_pager_area *area, struct pgt *new_pgt,
771 			   vaddr_t new_base)
772 {
773 	uint32_t exceptions = pager_lock_check_stack(64);
774 
775 	/*
776 	 * If there's no pgt assigned to the old area there's no pages to
777 	 * deal with either, just update with a new pgt and base.
778 	 */
779 	if (area->pgt) {
780 		struct core_mmu_table_info old_ti;
781 		struct core_mmu_table_info new_ti;
782 		struct tee_pager_pmem *pmem;
783 
784 		init_tbl_info_from_pgt(&old_ti, area->pgt);
785 		init_tbl_info_from_pgt(&new_ti, new_pgt);
786 
787 
788 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
789 			vaddr_t va;
790 			paddr_t pa;
791 			uint32_t attr;
792 
793 			if (pmem->area != area)
794 				continue;
795 			core_mmu_get_entry(&old_ti, pmem->pgidx, &pa, &attr);
796 			core_mmu_set_entry(&old_ti, pmem->pgidx, 0, 0);
797 
798 			assert(pa == get_pmem_pa(pmem));
799 			assert(attr);
800 			assert(area->pgt->num_used_entries);
801 			area->pgt->num_used_entries--;
802 
803 			va = core_mmu_idx2va(&old_ti, pmem->pgidx);
804 			va = va - area->base + new_base;
805 			pmem->pgidx = core_mmu_va2idx(&new_ti, va);
806 			core_mmu_set_entry(&new_ti, pmem->pgidx, pa, attr);
807 			new_pgt->num_used_entries++;
808 		}
809 	}
810 
811 	area->pgt = new_pgt;
812 	area->base = new_base;
813 	pager_unlock(exceptions);
814 }
815 KEEP_PAGER(transpose_area);
816 
817 void tee_pager_transfer_uta_region(struct user_ta_ctx *src_utc,
818 				   vaddr_t src_base,
819 				   struct user_ta_ctx *dst_utc,
820 				   vaddr_t dst_base, struct pgt **dst_pgt,
821 				   size_t size)
822 {
823 	struct tee_pager_area *area;
824 	struct tee_pager_area *next_a;
825 
826 	TAILQ_FOREACH_SAFE(area, src_utc->areas, link, next_a) {
827 		vaddr_t new_area_base;
828 		size_t new_idx;
829 
830 		if (!core_is_buffer_inside(area->base, area->size,
831 					  src_base, size))
832 			continue;
833 
834 		TAILQ_REMOVE(src_utc->areas, area, link);
835 
836 		new_area_base = dst_base + (src_base - area->base);
837 		new_idx = (new_area_base - dst_pgt[0]->vabase) /
838 			  CORE_MMU_PGDIR_SIZE;
839 		assert((new_area_base & ~CORE_MMU_PGDIR_MASK) ==
840 		       dst_pgt[new_idx]->vabase);
841 		transpose_area(area, dst_pgt[new_idx], new_area_base);
842 
843 		/*
844 		 * Assert that this will not cause any conflicts in the new
845 		 * utc.  This should already be guaranteed, but a bug here
846 		 * could be tricky to find.
847 		 */
848 		assert(!find_area(dst_utc->areas, area->base));
849 		TAILQ_INSERT_TAIL(dst_utc->areas, area, link);
850 	}
851 }
852 
853 static void rem_area(struct tee_pager_area_head *area_head,
854 		     struct tee_pager_area *area)
855 {
856 	struct tee_pager_pmem *pmem;
857 	uint32_t exceptions;
858 
859 	exceptions = pager_lock_check_stack(64);
860 
861 	TAILQ_REMOVE(area_head, area, link);
862 
863 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
864 		if (pmem->area == area) {
865 			area_set_entry(area, pmem->pgidx, 0, 0);
866 			tlbi_mva_allasid(area_idx2va(area, pmem->pgidx));
867 			pgt_dec_used_entries(area->pgt);
868 			pmem->area = NULL;
869 			pmem->pgidx = INVALID_PGIDX;
870 		}
871 	}
872 
873 	pager_unlock(exceptions);
874 	free_area(area);
875 }
876 KEEP_PAGER(rem_area);
877 
878 void tee_pager_rem_uta_region(struct user_ta_ctx *utc, vaddr_t base,
879 			      size_t size)
880 {
881 	struct tee_pager_area *area;
882 	struct tee_pager_area *next_a;
883 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
884 
885 	TAILQ_FOREACH_SAFE(area, utc->areas, link, next_a) {
886 		if (core_is_buffer_inside(area->base, area->size, base, s))
887 			rem_area(utc->areas, area);
888 	}
889 }
890 
891 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
892 {
893 	struct tee_pager_area *area;
894 
895 	if (!utc->areas)
896 		return;
897 
898 	while (true) {
899 		area = TAILQ_FIRST(utc->areas);
900 		if (!area)
901 			break;
902 		TAILQ_REMOVE(utc->areas, area, link);
903 		free_area(area);
904 	}
905 
906 	free(utc->areas);
907 }
908 
909 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
910 				 size_t size, uint32_t flags)
911 {
912 	bool ret;
913 	vaddr_t b = base;
914 	size_t s = size;
915 	size_t s2;
916 	struct tee_pager_area *area = find_area(utc->areas, b);
917 	uint32_t exceptions;
918 	struct tee_pager_pmem *pmem;
919 	paddr_t pa;
920 	uint32_t a;
921 	uint32_t f;
922 
923 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
924 	if (f & TEE_MATTR_UW)
925 		f |= TEE_MATTR_PW;
926 	f = get_area_mattr(f);
927 
928 	exceptions = pager_lock_check_stack(64);
929 
930 	while (s) {
931 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
932 		if (!area || area->base != b || area->size != s2) {
933 			ret = false;
934 			goto out;
935 		}
936 		b += s2;
937 		s -= s2;
938 
939 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
940 			if (pmem->area != area)
941 				continue;
942 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
943 			if (a & TEE_MATTR_VALID_BLOCK)
944 				assert(pa == get_pmem_pa(pmem));
945 			else
946 				pa = get_pmem_pa(pmem);
947 			if (a == f)
948 				continue;
949 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
950 			tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
951 			if (!(flags & TEE_MATTR_UW))
952 				tee_pager_save_page(pmem, a);
953 
954 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
955 			/*
956 			 * Make sure the table update is visible before
957 			 * continuing.
958 			 */
959 			dsb_ishst();
960 
961 			if (flags & TEE_MATTR_UX) {
962 				void *va = (void *)area_idx2va(pmem->area,
963 							       pmem->pgidx);
964 
965 				cache_op_inner(DCACHE_AREA_CLEAN, va,
966 						SMALL_PAGE_SIZE);
967 				cache_op_inner(ICACHE_AREA_INVALIDATE, va,
968 						SMALL_PAGE_SIZE);
969 			}
970 		}
971 
972 		area->flags = f;
973 		area = TAILQ_NEXT(area, link);
974 	}
975 
976 	ret = true;
977 out:
978 	pager_unlock(exceptions);
979 	return ret;
980 }
981 KEEP_PAGER(tee_pager_set_uta_area_attr);
982 #endif /*CFG_PAGED_USER_TA*/
983 
984 static bool tee_pager_unhide_page(vaddr_t page_va)
985 {
986 	struct tee_pager_pmem *pmem;
987 
988 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
989 		paddr_t pa;
990 		uint32_t attr;
991 
992 		if (pmem->pgidx == INVALID_PGIDX)
993 			continue;
994 
995 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
996 
997 		if (!(attr &
998 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
999 			continue;
1000 
1001 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
1002 			uint32_t a = get_area_mattr(pmem->area->flags);
1003 
1004 			/* page is hidden, show and move to back */
1005 			if (pa != get_pmem_pa(pmem))
1006 				panic("unexpected pa");
1007 
1008 			/*
1009 			 * If it's not a dirty block, then it should be
1010 			 * read only.
1011 			 */
1012 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
1013 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1014 			else
1015 				FMSG("Unhide %#" PRIxVA, page_va);
1016 
1017 			if (page_va == 0x8000a000)
1018 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
1019 					page_va, a);
1020 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
1021 			/*
1022 			 * Note that TLB invalidation isn't needed since
1023 			 * there wasn't a valid mapping before. We should
1024 			 * use a barrier though, to make sure that the
1025 			 * change is visible.
1026 			 */
1027 			dsb_ishst();
1028 
1029 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1030 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1031 			incr_hidden_hits();
1032 			return true;
1033 		}
1034 	}
1035 
1036 	return false;
1037 }
1038 
1039 static void tee_pager_hide_pages(void)
1040 {
1041 	struct tee_pager_pmem *pmem;
1042 	size_t n = 0;
1043 
1044 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1045 		paddr_t pa;
1046 		uint32_t attr;
1047 		uint32_t a;
1048 
1049 		if (n >= TEE_PAGER_NHIDE)
1050 			break;
1051 		n++;
1052 
1053 		/* we cannot hide pages when pmem->area is not defined. */
1054 		if (!pmem->area)
1055 			continue;
1056 
1057 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
1058 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1059 			continue;
1060 
1061 		assert(pa == get_pmem_pa(pmem));
1062 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
1063 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
1064 			FMSG("Hide %#" PRIxVA,
1065 			     area_idx2va(pmem->area, pmem->pgidx));
1066 		} else
1067 			a = TEE_MATTR_HIDDEN_BLOCK;
1068 
1069 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
1070 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1071 	}
1072 }
1073 
1074 /*
1075  * Find mapped pmem, hide and move to pageble pmem.
1076  * Return false if page was not mapped, and true if page was mapped.
1077  */
1078 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1079 				       vaddr_t page_va)
1080 {
1081 	struct tee_pager_pmem *pmem;
1082 	unsigned pgidx;
1083 	paddr_t pa;
1084 	uint32_t attr;
1085 
1086 	pgidx = area_va2idx(area, page_va);
1087 	area_get_entry(area, pgidx, &pa, &attr);
1088 
1089 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
1090 
1091 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1092 		if (pmem->area != area || pmem->pgidx != pgidx)
1093 			continue;
1094 
1095 		assert(pa == get_pmem_pa(pmem));
1096 		area_set_entry(area, pgidx, 0, 0);
1097 		pgt_dec_used_entries(area->pgt);
1098 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1099 		pmem->area = NULL;
1100 		pmem->pgidx = INVALID_PGIDX;
1101 		tee_pager_npages++;
1102 		set_npages();
1103 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1104 		incr_zi_released();
1105 		return true;
1106 	}
1107 
1108 	return false;
1109 }
1110 
1111 /* Finds the oldest page and unmats it from its old virtual address */
1112 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
1113 {
1114 	struct tee_pager_pmem *pmem;
1115 
1116 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1117 	if (!pmem) {
1118 		EMSG("No pmem entries");
1119 		return NULL;
1120 	}
1121 	if (pmem->pgidx != INVALID_PGIDX) {
1122 		uint32_t a;
1123 
1124 		assert(pmem->area && pmem->area->pgt);
1125 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
1126 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1127 		pgt_dec_used_entries(pmem->area->pgt);
1128 		tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1129 		tee_pager_save_page(pmem, a);
1130 	}
1131 
1132 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1133 	pmem->pgidx = INVALID_PGIDX;
1134 	pmem->area = NULL;
1135 	if (area->type == AREA_TYPE_LOCK) {
1136 		/* Move page to lock list */
1137 		if (tee_pager_npages <= 0)
1138 			panic("running out of page");
1139 		tee_pager_npages--;
1140 		set_npages();
1141 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1142 	} else {
1143 		/* move page to back */
1144 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1145 	}
1146 
1147 	return pmem;
1148 }
1149 
1150 static bool pager_update_permissions(struct tee_pager_area *area,
1151 			struct abort_info *ai, bool *handled)
1152 {
1153 	unsigned int pgidx = area_va2idx(area, ai->va);
1154 	uint32_t attr;
1155 	paddr_t pa;
1156 
1157 	*handled = false;
1158 
1159 	area_get_entry(area, pgidx, &pa, &attr);
1160 
1161 	/* Not mapped */
1162 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1163 		return false;
1164 
1165 	/* Not readable, should not happen */
1166 	if (abort_is_user_exception(ai)) {
1167 		if (!(attr & TEE_MATTR_UR))
1168 			return true;
1169 	} else {
1170 		if (!(attr & TEE_MATTR_PR)) {
1171 			abort_print_error(ai);
1172 			panic();
1173 		}
1174 	}
1175 
1176 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1177 	case CORE_MMU_FAULT_TRANSLATION:
1178 	case CORE_MMU_FAULT_READ_PERMISSION:
1179 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1180 			/* Check attempting to execute from an NOX page */
1181 			if (abort_is_user_exception(ai)) {
1182 				if (!(attr & TEE_MATTR_UX))
1183 					return true;
1184 			} else {
1185 				if (!(attr & TEE_MATTR_PX)) {
1186 					abort_print_error(ai);
1187 					panic();
1188 				}
1189 			}
1190 		}
1191 		/* Since the page is mapped now it's OK */
1192 		break;
1193 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1194 		/* Check attempting to write to an RO page */
1195 		if (abort_is_user_exception(ai)) {
1196 			if (!(area->flags & TEE_MATTR_UW))
1197 				return true;
1198 			if (!(attr & TEE_MATTR_UW)) {
1199 				FMSG("Dirty %p",
1200 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1201 				area_set_entry(area, pgidx, pa,
1202 					       get_area_mattr(area->flags));
1203 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1204 			}
1205 
1206 		} else {
1207 			if (!(area->flags & TEE_MATTR_PW)) {
1208 				abort_print_error(ai);
1209 				panic();
1210 			}
1211 			if (!(attr & TEE_MATTR_PW)) {
1212 				FMSG("Dirty %p",
1213 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
1214 				area_set_entry(area, pgidx, pa,
1215 					       get_area_mattr(area->flags));
1216 				tlbi_mva_allasid(ai->va & ~SMALL_PAGE_MASK);
1217 			}
1218 		}
1219 		/* Since permissions has been updated now it's OK */
1220 		break;
1221 	default:
1222 		/* Some fault we can't deal with */
1223 		if (abort_is_user_exception(ai))
1224 			return true;
1225 		abort_print_error(ai);
1226 		panic();
1227 	}
1228 	*handled = true;
1229 	return true;
1230 }
1231 
1232 #ifdef CFG_TEE_CORE_DEBUG
1233 static void stat_handle_fault(void)
1234 {
1235 	static size_t num_faults;
1236 	static size_t min_npages = SIZE_MAX;
1237 	static size_t total_min_npages = SIZE_MAX;
1238 
1239 	num_faults++;
1240 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1241 		DMSG("nfaults %zu npages %zu (min %zu)",
1242 		     num_faults, tee_pager_npages, min_npages);
1243 		min_npages = tee_pager_npages; /* reset */
1244 	}
1245 	if (tee_pager_npages < min_npages)
1246 		min_npages = tee_pager_npages;
1247 	if (tee_pager_npages < total_min_npages)
1248 		total_min_npages = tee_pager_npages;
1249 }
1250 #else
1251 static void stat_handle_fault(void)
1252 {
1253 }
1254 #endif
1255 
1256 bool tee_pager_handle_fault(struct abort_info *ai)
1257 {
1258 	struct tee_pager_area *area;
1259 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1260 	uint32_t exceptions;
1261 	bool ret;
1262 
1263 #ifdef TEE_PAGER_DEBUG_PRINT
1264 	abort_print(ai);
1265 #endif
1266 
1267 	/*
1268 	 * We're updating pages that can affect several active CPUs at a
1269 	 * time below. We end up here because a thread tries to access some
1270 	 * memory that isn't available. We have to be careful when making
1271 	 * that memory available as other threads may succeed in accessing
1272 	 * that address the moment after we've made it available.
1273 	 *
1274 	 * That means that we can't just map the memory and populate the
1275 	 * page, instead we use the aliased mapping to populate the page
1276 	 * and once everything is ready we map it.
1277 	 */
1278 	exceptions = pager_lock(ai);
1279 
1280 	stat_handle_fault();
1281 
1282 	/* check if the access is valid */
1283 	if (abort_is_user_exception(ai)) {
1284 		area = find_uta_area(ai->va);
1285 
1286 	} else {
1287 		area = find_area(&tee_pager_area_head, ai->va);
1288 		if (!area)
1289 			area = find_uta_area(ai->va);
1290 	}
1291 	if (!area || !area->pgt) {
1292 		ret = false;
1293 		goto out;
1294 	}
1295 
1296 	if (!tee_pager_unhide_page(page_va)) {
1297 		struct tee_pager_pmem *pmem = NULL;
1298 		uint32_t attr;
1299 		paddr_t pa;
1300 
1301 		/*
1302 		 * The page wasn't hidden, but some other core may have
1303 		 * updated the table entry before we got here or we need
1304 		 * to make a read-only page read-write (dirty).
1305 		 */
1306 		if (pager_update_permissions(area, ai, &ret)) {
1307 			/*
1308 			 * Nothing more to do with the abort. The problem
1309 			 * could already have been dealt with from another
1310 			 * core or if ret is false the TA will be paniced.
1311 			 */
1312 			goto out;
1313 		}
1314 
1315 		pmem = tee_pager_get_page(area);
1316 		if (!pmem) {
1317 			abort_print(ai);
1318 			panic();
1319 		}
1320 
1321 		/* load page code & data */
1322 		tee_pager_load_page(area, page_va, pmem->va_alias);
1323 
1324 
1325 		pmem->area = area;
1326 		pmem->pgidx = area_va2idx(area, ai->va);
1327 		attr = get_area_mattr(area->flags) &
1328 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1329 		pa = get_pmem_pa(pmem);
1330 
1331 		/*
1332 		 * We've updated the page using the aliased mapping and
1333 		 * some cache maintenence is now needed if it's an
1334 		 * executable page.
1335 		 *
1336 		 * Since the d-cache is a Physically-indexed,
1337 		 * physically-tagged (PIPT) cache we can clean either the
1338 		 * aliased address or the real virtual address. In this
1339 		 * case we choose the real virtual address.
1340 		 *
1341 		 * The i-cache can also be PIPT, but may be something else
1342 		 * too like VIPT. The current code requires the caches to
1343 		 * implement the IVIPT extension, that is:
1344 		 * "instruction cache maintenance is required only after
1345 		 * writing new data to a physical address that holds an
1346 		 * instruction."
1347 		 *
1348 		 * To portably invalidate the icache the page has to
1349 		 * be mapped at the final virtual address but not
1350 		 * executable.
1351 		 */
1352 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1353 			uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1354 					TEE_MATTR_PW | TEE_MATTR_UW;
1355 
1356 			/* Set a temporary read-only mapping */
1357 			area_set_entry(pmem->area, pmem->pgidx, pa,
1358 				       attr & ~mask);
1359 			tlbi_mva_allasid(page_va);
1360 
1361 			/*
1362 			 * Doing these operations to LoUIS (Level of
1363 			 * unification, Inner Shareable) would be enough
1364 			 */
1365 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)page_va,
1366 				       SMALL_PAGE_SIZE);
1367 			cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)page_va,
1368 				       SMALL_PAGE_SIZE);
1369 
1370 			/* Set the final mapping */
1371 			area_set_entry(area, pmem->pgidx, pa, attr);
1372 			tlbi_mva_allasid(page_va);
1373 		} else {
1374 			area_set_entry(area, pmem->pgidx, pa, attr);
1375 			/*
1376 			 * No need to flush TLB for this entry, it was
1377 			 * invalid. We should use a barrier though, to make
1378 			 * sure that the change is visible.
1379 			 */
1380 			dsb_ishst();
1381 		}
1382 		pgt_inc_used_entries(area->pgt);
1383 
1384 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1385 
1386 	}
1387 
1388 	tee_pager_hide_pages();
1389 	ret = true;
1390 out:
1391 	pager_unlock(exceptions);
1392 	return ret;
1393 }
1394 
1395 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1396 {
1397 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1398 	size_t n;
1399 
1400 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1401 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1402 
1403 	/* setup memory */
1404 	for (n = 0; n < npages; n++) {
1405 		struct tee_pager_pmem *pmem;
1406 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1407 		unsigned pgidx = core_mmu_va2idx(ti, va);
1408 		paddr_t pa;
1409 		uint32_t attr;
1410 
1411 		/*
1412 		 * Note that we can only support adding pages in the
1413 		 * valid range of this table info, currently not a problem.
1414 		 */
1415 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1416 
1417 		/* Ignore unmapped pages/blocks */
1418 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1419 			continue;
1420 
1421 		pmem = malloc(sizeof(struct tee_pager_pmem));
1422 		if (!pmem)
1423 			panic("out of mem");
1424 
1425 		pmem->va_alias = pager_add_alias_page(pa);
1426 
1427 		if (unmap) {
1428 			pmem->area = NULL;
1429 			pmem->pgidx = INVALID_PGIDX;
1430 			core_mmu_set_entry(ti, pgidx, 0, 0);
1431 			pgt_dec_used_entries(&pager_core_pgt);
1432 		} else {
1433 			/*
1434 			 * The page is still mapped, let's assign the area
1435 			 * and update the protection bits accordingly.
1436 			 */
1437 			pmem->area = find_area(&tee_pager_area_head, va);
1438 			assert(pmem->area->pgt == &pager_core_pgt);
1439 			pmem->pgidx = pgidx;
1440 			assert(pa == get_pmem_pa(pmem));
1441 			area_set_entry(pmem->area, pgidx, pa,
1442 				       get_area_mattr(pmem->area->flags));
1443 		}
1444 
1445 		tee_pager_npages++;
1446 		incr_npages_all();
1447 		set_npages();
1448 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1449 	}
1450 
1451 	/*
1452 	 * As this is done at inits, invalidate all TLBs once instead of
1453 	 * targeting only the modified entries.
1454 	 */
1455 	tlbi_all();
1456 }
1457 
1458 #ifdef CFG_PAGED_USER_TA
1459 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1460 {
1461 	struct pgt *p = pgt;
1462 
1463 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1464 		p = SLIST_NEXT(p, link);
1465 	return p;
1466 }
1467 
1468 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1469 {
1470 	struct tee_pager_area *area;
1471 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1472 
1473 	TAILQ_FOREACH(area, utc->areas, link) {
1474 		if (!area->pgt)
1475 			area->pgt = find_pgt(pgt, area->base);
1476 		else
1477 			assert(area->pgt == find_pgt(pgt, area->base));
1478 		if (!area->pgt)
1479 			panic();
1480 	}
1481 }
1482 
1483 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1484 {
1485 	uint32_t attr;
1486 
1487 	assert(pmem->area && pmem->area->pgt);
1488 
1489 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1490 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1491 	tlbi_mva_allasid(area_idx2va(pmem->area, pmem->pgidx));
1492 	tee_pager_save_page(pmem, attr);
1493 	assert(pmem->area->pgt->num_used_entries);
1494 	pmem->area->pgt->num_used_entries--;
1495 	pmem->pgidx = INVALID_PGIDX;
1496 	pmem->area = NULL;
1497 }
1498 
1499 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1500 {
1501 	struct tee_pager_pmem *pmem;
1502 	struct tee_pager_area *area;
1503 	uint32_t exceptions = pager_lock_check_stack(2048);
1504 
1505 	if (!pgt->num_used_entries)
1506 		goto out;
1507 
1508 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1509 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1510 			continue;
1511 		if (pmem->area->pgt == pgt)
1512 			pager_save_and_release_entry(pmem);
1513 	}
1514 	assert(!pgt->num_used_entries);
1515 
1516 out:
1517 	if (is_user_ta_ctx(pgt->ctx)) {
1518 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1519 			if (area->pgt == pgt)
1520 				area->pgt = NULL;
1521 		}
1522 	}
1523 
1524 	pager_unlock(exceptions);
1525 }
1526 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1527 #endif /*CFG_PAGED_USER_TA*/
1528 
1529 void tee_pager_release_phys(void *addr, size_t size)
1530 {
1531 	bool unmaped = false;
1532 	vaddr_t va = (vaddr_t)addr;
1533 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1534 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1535 	struct tee_pager_area *area;
1536 	uint32_t exceptions;
1537 
1538 	if (end <= begin)
1539 		return;
1540 
1541 	area = find_area(&tee_pager_area_head, begin);
1542 	if (!area ||
1543 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1544 		panic();
1545 
1546 	exceptions = pager_lock_check_stack(128);
1547 
1548 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1549 		unmaped |= tee_pager_release_one_phys(area, va);
1550 
1551 	if (unmaped)
1552 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1553 
1554 	pager_unlock(exceptions);
1555 }
1556 KEEP_PAGER(tee_pager_release_phys);
1557 
1558 void *tee_pager_alloc(size_t size, uint32_t flags)
1559 {
1560 	tee_mm_entry_t *mm;
1561 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1562 
1563 	if (!size)
1564 		return NULL;
1565 
1566 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1567 	if (!mm)
1568 		return NULL;
1569 
1570 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1571 				f, NULL, NULL);
1572 
1573 	return (void *)tee_mm_get_smem(mm);
1574 }
1575