xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision a50cb361d9e5735f197ccc87beb0d24af8315369)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <assert.h>
30 #include <keep.h>
31 #include <sys/queue.h>
32 #include <kernel/abort.h>
33 #include <kernel/panic.h>
34 #include <kernel/tee_misc.h>
35 #include <kernel/tee_ta_manager.h>
36 #include <kernel/thread.h>
37 #include <kernel/tz_proc.h>
38 #include <mm/core_memprot.h>
39 #include <mm/tee_mm.h>
40 #include <mm/tee_mmu_defs.h>
41 #include <mm/tee_pager.h>
42 #include <types_ext.h>
43 #include <stdlib.h>
44 #include <tee_api_defines.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <trace.h>
47 #include <utee_defines.h>
48 #include <util.h>
49 
50 #include "pager_private.h"
51 
52 #define PAGER_AE_KEY_BITS	256
53 
54 struct pager_rw_pstate {
55 	uint64_t iv;
56 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
57 };
58 
59 struct tee_pager_area {
60 	union {
61 		const uint8_t *hashes;
62 		struct pager_rw_pstate *rwp;
63 	} u;
64 	uint8_t *store;
65 	uint32_t flags;
66 	vaddr_t base;
67 	size_t size;
68 	TAILQ_ENTRY(tee_pager_area) link;
69 };
70 
71 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
73 
74 #define INVALID_PGIDX	UINT_MAX
75 
76 /*
77  * struct tee_pager_pmem - Represents a physical page used for paging.
78  *
79  * @pgidx	an index of the entry in tee_pager_tbl_info.
80  * @va_alias	Virtual address where the physical page always is aliased.
81  *		Used during remapping of the page when the content need to
82  *		be updated before it's available at the new location.
83  * @area	a pointer to the pager area
84  */
85 struct tee_pager_pmem {
86 	unsigned pgidx;
87 	void *va_alias;
88 	struct tee_pager_area *area;
89 	TAILQ_ENTRY(tee_pager_pmem) link;
90 };
91 
92 /* The list of physical pages. The first page in the list is the oldest */
93 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
94 
95 static struct tee_pager_pmem_head tee_pager_pmem_head =
96 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
97 
98 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
99 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
100 
101 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
102 
103 /* number of pages hidden */
104 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
105 
106 /* Number of registered physical pages, used hiding pages. */
107 static size_t tee_pager_npages;
108 
109 #ifdef CFG_WITH_STATS
110 static struct tee_pager_stats pager_stats;
111 
112 static inline void incr_ro_hits(void)
113 {
114 	pager_stats.ro_hits++;
115 }
116 
117 static inline void incr_rw_hits(void)
118 {
119 	pager_stats.rw_hits++;
120 }
121 
122 static inline void incr_hidden_hits(void)
123 {
124 	pager_stats.hidden_hits++;
125 }
126 
127 static inline void incr_zi_released(void)
128 {
129 	pager_stats.zi_released++;
130 }
131 
132 static inline void incr_npages_all(void)
133 {
134 	pager_stats.npages_all++;
135 }
136 
137 static inline void set_npages(void)
138 {
139 	pager_stats.npages = tee_pager_npages;
140 }
141 
142 void tee_pager_get_stats(struct tee_pager_stats *stats)
143 {
144 	*stats = pager_stats;
145 
146 	pager_stats.hidden_hits = 0;
147 	pager_stats.ro_hits = 0;
148 	pager_stats.rw_hits = 0;
149 	pager_stats.zi_released = 0;
150 }
151 
152 #else /* CFG_WITH_STATS */
153 static inline void incr_ro_hits(void) { }
154 static inline void incr_rw_hits(void) { }
155 static inline void incr_hidden_hits(void) { }
156 static inline void incr_zi_released(void) { }
157 static inline void incr_npages_all(void) { }
158 static inline void set_npages(void) { }
159 
160 void tee_pager_get_stats(struct tee_pager_stats *stats)
161 {
162 	memset(stats, 0, sizeof(struct tee_pager_stats));
163 }
164 #endif /* CFG_WITH_STATS */
165 
166 struct core_mmu_table_info tee_pager_tbl_info;
167 static struct core_mmu_table_info pager_alias_tbl_info;
168 
169 static unsigned pager_lock = SPINLOCK_UNLOCK;
170 
171 /* Defines the range of the alias area */
172 static tee_mm_entry_t *pager_alias_area;
173 /*
174  * Physical pages are added in a stack like fashion to the alias area,
175  * @pager_alias_next_free gives the address of next free entry if
176  * @pager_alias_next_free is != 0
177  */
178 static uintptr_t pager_alias_next_free;
179 
180 static void set_alias_area(tee_mm_entry_t *mm)
181 {
182 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
183 	size_t tbl_va_size;
184 	unsigned idx;
185 	unsigned last_idx;
186 	vaddr_t smem = tee_mm_get_smem(mm);
187 	size_t nbytes = tee_mm_get_bytes(mm);
188 
189 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
190 
191 	if (pager_alias_area)
192 		panic("null pager_alias_area");
193 
194 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
195 		panic("Can't find translation table");
196 
197 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
198 		panic("Unsupported page size in translation table");
199 
200 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
201 	if (!core_is_buffer_inside(smem, nbytes,
202 				   ti->va_base, tbl_va_size)) {
203 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
204 		     smem, nbytes, ti->va_base, tbl_va_size);
205 		panic();
206 	}
207 
208 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
209 		panic("invalid area alignment");
210 
211 	pager_alias_area = mm;
212 	pager_alias_next_free = smem;
213 
214 	/* Clear all mapping in the alias area */
215 	idx = core_mmu_va2idx(ti, smem);
216 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
217 	for (; idx < last_idx; idx++)
218 		core_mmu_set_entry(ti, idx, 0, 0);
219 
220 	/* TODO only invalidate entries touched above */
221 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
222 }
223 
224 static void generate_ae_key(void)
225 {
226 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
227 		panic("failed to generate random");
228 }
229 
230 void tee_pager_init(tee_mm_entry_t *mm_alias)
231 {
232 	set_alias_area(mm_alias);
233 	generate_ae_key();
234 }
235 
236 static void *pager_add_alias_page(paddr_t pa)
237 {
238 	unsigned idx;
239 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
240 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
241 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
242 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
243 
244 	DMSG("0x%" PRIxPA, pa);
245 
246 	if (!pager_alias_next_free || !ti->num_entries)
247 		panic("invalid alias entry");
248 
249 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
250 	core_mmu_set_entry(ti, idx, pa, attr);
251 	pager_alias_next_free += SMALL_PAGE_SIZE;
252 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
253 				      tee_mm_get_bytes(pager_alias_area)))
254 		pager_alias_next_free = 0;
255 	return (void *)core_mmu_idx2va(ti, idx);
256 }
257 
258 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size,
259 			uint32_t flags, const void *store, const void *hashes)
260 {
261 	struct tee_pager_area *area = calloc(1, sizeof(*area));
262 	tee_mm_entry_t *mm_store = NULL;
263 
264 	if (!area)
265 		return NULL;
266 
267 	if (flags & TEE_MATTR_PW) {
268 		if (flags & TEE_MATTR_LOCKED)
269 			goto out;
270 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
271 		if (!mm_store)
272 			goto bad;
273 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
274 					   MEM_AREA_TA_RAM);
275 		if (!area->store)
276 			goto bad;
277 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
278 				     sizeof(struct pager_rw_pstate));
279 		if (!area->u.rwp)
280 			goto bad;
281 	} else {
282 		area->store = (void *)store;
283 		area->u.hashes = hashes;
284 	}
285 out:
286 	area->base = base;
287 	area->size = size;
288 	area->flags = flags;
289 	return area;
290 bad:
291 	tee_mm_free(mm_store);
292 	free(area->u.rwp);
293 	free(area);
294 	return NULL;
295 }
296 
297 static void area_insert_tail(struct tee_pager_area *area)
298 {
299 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
300 
301 	cpu_spin_lock(&pager_lock);
302 
303 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
304 
305 	cpu_spin_unlock(&pager_lock);
306 	thread_set_exceptions(exceptions);
307 }
308 KEEP_PAGER(area_insert_tail);
309 
310 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
311 			const void *store, const void *hashes)
312 {
313 	struct tee_pager_area *area;
314 	size_t tbl_va_size;
315 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
316 
317 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
318 		base, base + size, flags, store, hashes);
319 
320 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
321 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
322 		panic();
323 	}
324 
325 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
326 		panic("write pages cannot provide store or hashes");
327 
328 	if ((flags & TEE_MATTR_PW) && (store || hashes))
329 		panic("non-write pages must provide store and hashes");
330 
331 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
332 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
333 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
334 			base, size, ti->va_base, tbl_va_size);
335 		return false;
336 	}
337 
338 	area = alloc_area(base, size, flags, store, hashes);
339 	if (!area)
340 		return false;
341 
342 	area_insert_tail(area);
343 	return true;
344 }
345 
346 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
347 {
348 	struct tee_pager_area *area;
349 
350 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
351 		if (core_is_buffer_inside(va, 1, area->base, area->size))
352 			return area;
353 	}
354 	return NULL;
355 }
356 
357 static uint32_t get_area_mattr(struct tee_pager_area *area)
358 {
359 	return TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
360 	       TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
361 	       TEE_MATTR_SECURE | TEE_MATTR_PR |
362 	       (area->flags & TEE_MATTR_PRWX);
363 }
364 
365 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
366 {
367 	paddr_t pa;
368 	unsigned idx;
369 
370 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
371 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
372 	return pa;
373 }
374 
375 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
376 			void *dst)
377 {
378 	struct pager_aes_gcm_iv iv = {
379 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
380 	};
381 
382 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
383 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
384 }
385 
386 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
387 {
388 	struct pager_aes_gcm_iv iv;
389 
390 	assert((rwp->iv + 1) > rwp->iv);
391 	rwp->iv++;
392 	/*
393 	 * IV is constructed as recommended in section "8.2.1 Deterministic
394 	 * Construction" of "Recommendation for Block Cipher Modes of
395 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
396 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
397 	 */
398 	iv.iv[0] = (vaddr_t)rwp;
399 	iv.iv[1] = rwp->iv >> 32;
400 	iv.iv[2] = rwp->iv;
401 
402 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
403 				   &iv, rwp->tag,
404 				   src, dst, SMALL_PAGE_SIZE))
405 		panic("gcm failed");
406 }
407 
408 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
409 			void *va_alias)
410 {
411 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
412 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
413 
414 	if (!(area->flags & TEE_MATTR_PW)) {
415 		const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE;
416 
417 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
418 		incr_ro_hits();
419 
420 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
421 				TEE_SUCCESS) {
422 			EMSG("PH 0x%" PRIxVA " failed", page_va);
423 			panic();
424 		}
425 	} else if (area->flags & TEE_MATTR_LOCKED) {
426 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
427 		memset(va_alias, 0, SMALL_PAGE_SIZE);
428 	} else {
429 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
430 			va_alias, page_va, area->u.rwp[idx].iv);
431 		if (!area->u.rwp[idx].iv)
432 			memset(va_alias, 0, SMALL_PAGE_SIZE);
433 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
434 				       va_alias)) {
435 			EMSG("PH 0x%" PRIxVA " failed", page_va);
436 			panic();
437 		}
438 		incr_rw_hits();
439 	}
440 }
441 
442 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
443 {
444 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
445 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
446 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
447 
448 	assert(!(pmem->area->flags & TEE_MATTR_LOCKED));
449 
450 	if (attr & dirty_bits) {
451 		size_t idx = pmem->pgidx - core_mmu_va2idx(ti,
452 							   pmem->area->base);
453 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
454 
455 		assert(pmem->area->flags & TEE_MATTR_PW);
456 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
457 			     stored_page);
458 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
459 			core_mmu_idx2va(ti, pmem->pgidx),
460 			pmem->area->u.rwp[idx].iv);
461 	}
462 }
463 
464 static bool tee_pager_unhide_page(vaddr_t page_va)
465 {
466 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
467 	struct tee_pager_pmem *pmem;
468 
469 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
470 		paddr_t pa;
471 		uint32_t attr;
472 
473 		if (pmem->pgidx == INVALID_PGIDX)
474 			continue;
475 
476 		core_mmu_get_entry(ti, pmem->pgidx,
477 				   &pa, &attr);
478 
479 		if (!(attr &
480 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
481 			continue;
482 
483 		if (core_mmu_va2idx(ti, page_va) == pmem->pgidx) {
484 			uint32_t a = get_area_mattr(pmem->area);
485 
486 			/* page is hidden, show and move to back */
487 			if (pa != get_pmem_pa(pmem))
488 				panic("unexpected pa");
489 
490 			/*
491 			 * If it's not a dirty block, then it should be
492 			 * read only.
493 			 */
494 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
495 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
496 			else
497 				FMSG("Unhide %#" PRIxVA, page_va);
498 			core_mmu_set_entry(ti, pmem->pgidx, pa, a);
499 
500 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
501 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
502 
503 			/* TODO only invalidate entry touched above */
504 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
505 
506 			incr_hidden_hits();
507 			return true;
508 		}
509 	}
510 
511 	return false;
512 }
513 
514 static void tee_pager_hide_pages(void)
515 {
516 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
517 	struct tee_pager_pmem *pmem;
518 	size_t n = 0;
519 
520 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
521 		paddr_t pa;
522 		uint32_t attr;
523 		uint32_t a;
524 
525 		if (n >= TEE_PAGER_NHIDE)
526 			break;
527 		n++;
528 
529 		/*
530 		 * we cannot hide pages when pmem->area is not defined as
531 		 * unhide requires pmem->area to be defined
532 		 */
533 		if (!pmem->area)
534 			continue;
535 
536 		core_mmu_get_entry(ti, pmem->pgidx, &pa, &attr);
537 		if (!(attr & TEE_MATTR_VALID_BLOCK))
538 			continue;
539 
540 		assert(pa == get_pmem_pa(pmem));
541 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
542 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
543 			FMSG("Hide %#" PRIxVA,
544 			     ti->va_base + pmem->pgidx * SMALL_PAGE_SIZE);
545 		} else
546 			a = TEE_MATTR_HIDDEN_BLOCK;
547 		core_mmu_set_entry(ti, pmem->pgidx, pa, a);
548 
549 	}
550 
551 	/* TODO only invalidate entries touched above */
552 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
553 }
554 
555 /*
556  * Find mapped pmem, hide and move to pageble pmem.
557  * Return false if page was not mapped, and true if page was mapped.
558  */
559 static bool tee_pager_release_one_phys(vaddr_t page_va)
560 {
561 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
562 	struct tee_pager_pmem *pmem;
563 	unsigned pgidx;
564 	paddr_t pa;
565 	uint32_t attr;
566 
567 	pgidx = core_mmu_va2idx(ti, page_va);
568 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
569 
570 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
571 
572 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
573 		if (pmem->pgidx != pgidx)
574 			continue;
575 
576 		assert(pa == get_pmem_pa(pmem));
577 		core_mmu_set_entry(ti, pgidx, 0, 0);
578 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
579 		pmem->area = NULL;
580 		pmem->pgidx = INVALID_PGIDX;
581 		tee_pager_npages++;
582 		set_npages();
583 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
584 		incr_zi_released();
585 		return true;
586 	}
587 
588 	return false;
589 }
590 
591 /* Finds the oldest page and unmats it from its old virtual address */
592 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags)
593 {
594 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
595 	struct tee_pager_pmem *pmem;
596 
597 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
598 	if (!pmem) {
599 		EMSG("No pmem entries");
600 		return NULL;
601 	}
602 	if (pmem->pgidx != INVALID_PGIDX) {
603 		uint32_t a;
604 
605 		core_mmu_get_entry(ti, pmem->pgidx, NULL, &a);
606 		core_mmu_set_entry(ti, pmem->pgidx, 0, 0);
607 		/* TODO only invalidate entries touched above */
608 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
609 		tee_pager_save_page(pmem, a);
610 	}
611 
612 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
613 	pmem->pgidx = INVALID_PGIDX;
614 	pmem->area = NULL;
615 	if (next_area_flags & TEE_MATTR_LOCKED) {
616 		/* Move page to lock list */
617 		if (tee_pager_npages <= 0)
618 			panic("running out of page");
619 		tee_pager_npages--;
620 		set_npages();
621 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
622 	} else {
623 		/* move page to back */
624 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
625 	}
626 
627 	return pmem;
628 }
629 
630 static bool pager_update_permissions(struct tee_pager_area *area,
631 			struct abort_info *ai)
632 {
633 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
634 	unsigned pgidx = core_mmu_va2idx(ti, ai->va);
635 	uint32_t attr;
636 	paddr_t pa;
637 
638 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
639 
640 	/* Not mapped */
641 	if (!(attr & TEE_MATTR_VALID_BLOCK))
642 		return false;
643 
644 	/* Not readable, should not happen */
645 	if (!(attr & TEE_MATTR_PR)) {
646 		abort_print_error(ai);
647 		panic();
648 	}
649 
650 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
651 	case CORE_MMU_FAULT_TRANSLATION:
652 	case CORE_MMU_FAULT_READ_PERMISSION:
653 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
654 		    !(attr & TEE_MATTR_PX)) {
655 			/* Attempting to execute from an NOX page */
656 			abort_print_error(ai);
657 			panic();
658 		}
659 		/* Since the page is mapped now it's OK */
660 		return true;
661 	case CORE_MMU_FAULT_WRITE_PERMISSION:
662 		if (!(area->flags & TEE_MATTR_PW)) {
663 			/* Attempting to write to an RO page */
664 			abort_print_error(ai);
665 			panic();
666 		}
667 		if (!(attr & TEE_MATTR_PW)) {
668 			FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK));
669 			core_mmu_set_entry(ti, pgidx, pa, attr | TEE_MATTR_PW);
670 			/* TODO only invalidate entry above */
671 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
672 		}
673 		/* Since permissions has been updated now it's OK */
674 		return true;
675 	default:
676 		/* Some fault we can't deal with */
677 		abort_print_error(ai);
678 		panic();
679 	}
680 }
681 
682 #ifdef CFG_TEE_CORE_DEBUG
683 static void stat_handle_fault(void)
684 {
685 	static size_t num_faults;
686 	static size_t min_npages = SIZE_MAX;
687 	static size_t total_min_npages = SIZE_MAX;
688 
689 	num_faults++;
690 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
691 		DMSG("nfaults %zu npages %zu (min %zu)",
692 		     num_faults, tee_pager_npages, min_npages);
693 		min_npages = tee_pager_npages; /* reset */
694 	}
695 	if (tee_pager_npages < min_npages)
696 		min_npages = tee_pager_npages;
697 	if (tee_pager_npages < total_min_npages)
698 		total_min_npages = tee_pager_npages;
699 }
700 #else
701 static void stat_handle_fault(void)
702 {
703 }
704 #endif
705 
706 bool tee_pager_handle_fault(struct abort_info *ai)
707 {
708 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
709 	struct tee_pager_area *area;
710 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
711 	uint32_t exceptions;
712 	bool ret;
713 
714 #ifdef TEE_PAGER_DEBUG_PRINT
715 	abort_print(ai);
716 #endif
717 
718 	/*
719 	 * We're updating pages that can affect several active CPUs at a
720 	 * time below. We end up here because a thread tries to access some
721 	 * memory that isn't available. We have to be careful when making
722 	 * that memory available as other threads may succeed in accessing
723 	 * that address the moment after we've made it available.
724 	 *
725 	 * That means that we can't just map the memory and populate the
726 	 * page, instead we use the aliased mapping to populate the page
727 	 * and once everything is ready we map it.
728 	 */
729 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
730 	cpu_spin_lock(&pager_lock);
731 
732 	stat_handle_fault();
733 
734 	/* check if the access is valid */
735 	area = tee_pager_find_area(ai->va);
736 	if (!area) {
737 		EMSG("Invalid addr 0x%" PRIxVA, ai->va);
738 		ret = false;
739 		goto out;
740 	}
741 
742 	if (!tee_pager_unhide_page(page_va)) {
743 		struct tee_pager_pmem *pmem = NULL;
744 		uint32_t attr;
745 
746 		/*
747 		 * The page wasn't hidden, but some other core may have
748 		 * updated the table entry before we got here or we need
749 		 * to make a read-only page read-write (dirty).
750 		 */
751 		if (pager_update_permissions(area, ai)) {
752 			/*
753 			 * Kind of access is OK with the mapping, we're
754 			 * done here because the fault has already been
755 			 * dealt with by another core.
756 			 */
757 			ret = true;
758 			goto out;
759 		}
760 
761 		pmem = tee_pager_get_page(area->flags);
762 		if (!pmem) {
763 			abort_print(ai);
764 			panic();
765 		}
766 
767 		/* load page code & data */
768 		tee_pager_load_page(area, page_va, pmem->va_alias);
769 
770 		/*
771 		 * We've updated the page using the aliased mapping and
772 		 * some cache maintenence is now needed if it's an
773 		 * executable page.
774 		 *
775 		 * Since the d-cache is a Physically-indexed,
776 		 * physically-tagged (PIPT) cache we can clean the aliased
777 		 * address instead of the real virtual address.
778 		 *
779 		 * The i-cache can also be PIPT, but may be something else
780 		 * to, to keep it simple we invalidate the entire i-cache.
781 		 * As a future optimization we may invalidate only the
782 		 * aliased area if it a PIPT cache else the entire cache.
783 		 */
784 		if (area->flags & TEE_MATTR_PX) {
785 			/*
786 			 * Doing these operations to LoUIS (Level of
787 			 * unification, Inner Shareable) would be enough
788 			 */
789 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
790 				pmem->va_alias, SMALL_PAGE_SIZE);
791 
792 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
793 		}
794 
795 		pmem->area = area;
796 		pmem->pgidx = core_mmu_va2idx(ti, ai->va);
797 		attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW);
798 		core_mmu_set_entry(ti, pmem->pgidx, get_pmem_pa(pmem), attr);
799 
800 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
801 		     core_mmu_idx2va(ti, pmem->pgidx), get_pmem_pa(pmem));
802 
803 	}
804 
805 	tee_pager_hide_pages();
806 	ret = true;
807 out:
808 	cpu_spin_unlock(&pager_lock);
809 	thread_unmask_exceptions(exceptions);
810 	return ret;
811 }
812 
813 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
814 {
815 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
816 	size_t n;
817 
818 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
819 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
820 
821 	/* setup memory */
822 	for (n = 0; n < npages; n++) {
823 		struct tee_pager_pmem *pmem;
824 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
825 		unsigned pgidx = core_mmu_va2idx(ti, va);
826 		paddr_t pa;
827 		uint32_t attr;
828 
829 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
830 
831 		/* Ignore unmapped pages/blocks */
832 		if (!(attr & TEE_MATTR_VALID_BLOCK))
833 			continue;
834 
835 		pmem = malloc(sizeof(struct tee_pager_pmem));
836 		if (!pmem)
837 			panic("out of mem");
838 
839 		pmem->va_alias = pager_add_alias_page(pa);
840 
841 		if (unmap) {
842 			pmem->area = NULL;
843 			pmem->pgidx = INVALID_PGIDX;
844 			core_mmu_set_entry(ti, pgidx, 0, 0);
845 		} else {
846 			/*
847 			 * The page is still mapped, let's assign the area
848 			 * and update the protection bits accordingly.
849 			 */
850 			pmem->area = tee_pager_find_area(va);
851 			pmem->pgidx = pgidx;
852 			assert(pa == get_pmem_pa(pmem));
853 			core_mmu_set_entry(ti, pgidx, pa,
854 					   get_area_mattr(pmem->area));
855 		}
856 
857 		tee_pager_npages++;
858 		incr_npages_all();
859 		set_npages();
860 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
861 	}
862 
863 	/* Invalidate secure TLB */
864 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
865 }
866 
867 void tee_pager_release_phys(void *addr, size_t size)
868 {
869 	bool unmaped = false;
870 	vaddr_t va = (vaddr_t)addr;
871 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
872 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
873 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
874 
875 	cpu_spin_lock(&pager_lock);
876 
877 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
878 		unmaped |= tee_pager_release_one_phys(va);
879 
880 	/* Invalidate secure TLB */
881 	if (unmaped)
882 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
883 
884 	cpu_spin_unlock(&pager_lock);
885 	thread_set_exceptions(exceptions);
886 }
887 KEEP_PAGER(tee_pager_release_phys);
888 
889 void *tee_pager_alloc(size_t size, uint32_t flags)
890 {
891 	tee_mm_entry_t *mm;
892 	uint32_t f = TEE_MATTR_PRW | (flags & TEE_MATTR_LOCKED);
893 
894 	if (!size)
895 		return NULL;
896 
897 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
898 	if (!mm)
899 		return NULL;
900 
901 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
902 				f, NULL, NULL);
903 
904 	return (void *)tee_mm_get_smem(mm);
905 }
906