xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 5e7638a863f42f6b400e0707fcb790057916451f)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_memprot.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 #include <keep.h>
48 #include "pager_private.h"
49 
50 #define PAGER_AE_KEY_BITS	256
51 
52 struct pager_rw_pstate {
53 	uint64_t iv;
54 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
55 };
56 
57 struct tee_pager_area {
58 	union {
59 		const uint8_t *hashes;
60 		struct pager_rw_pstate *rwp;
61 	} u;
62 	uint8_t *store;
63 	uint32_t flags;
64 	vaddr_t base;
65 	size_t size;
66 	TAILQ_ENTRY(tee_pager_area) link;
67 };
68 
69 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
70 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
71 
72 #define INVALID_PGIDX	UINT_MAX
73 
74 /*
75  * struct tee_pager_pmem - Represents a physical page used for paging.
76  *
77  * @pgidx	an index of the entry in tee_pager_tbl_info.
78  * @va_alias	Virtual address where the physical page always is aliased.
79  *		Used during remapping of the page when the content need to
80  *		be updated before it's available at the new location.
81  * @area	a pointer to the pager area
82  */
83 struct tee_pager_pmem {
84 	unsigned pgidx;
85 	void *va_alias;
86 	struct tee_pager_area *area;
87 	TAILQ_ENTRY(tee_pager_pmem) link;
88 };
89 
90 /* The list of physical pages. The first page in the list is the oldest */
91 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
92 
93 static struct tee_pager_pmem_head tee_pager_pmem_head =
94 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
95 
96 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
97 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
98 
99 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
100 
101 /* number of pages hidden */
102 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
103 
104 /* Number of registered physical pages, used hiding pages. */
105 static size_t tee_pager_npages;
106 
107 #ifdef CFG_WITH_STATS
108 static struct tee_pager_stats pager_stats;
109 
110 static inline void incr_ro_hits(void)
111 {
112 	pager_stats.ro_hits++;
113 }
114 
115 static inline void incr_rw_hits(void)
116 {
117 	pager_stats.rw_hits++;
118 }
119 
120 static inline void incr_hidden_hits(void)
121 {
122 	pager_stats.hidden_hits++;
123 }
124 
125 static inline void incr_zi_released(void)
126 {
127 	pager_stats.zi_released++;
128 }
129 
130 static inline void incr_npages_all(void)
131 {
132 	pager_stats.npages_all++;
133 }
134 
135 static inline void set_npages(void)
136 {
137 	pager_stats.npages = tee_pager_npages;
138 }
139 
140 void tee_pager_get_stats(struct tee_pager_stats *stats)
141 {
142 	*stats = pager_stats;
143 
144 	pager_stats.hidden_hits = 0;
145 	pager_stats.ro_hits = 0;
146 	pager_stats.rw_hits = 0;
147 	pager_stats.zi_released = 0;
148 }
149 
150 #else /* CFG_WITH_STATS */
151 static inline void incr_ro_hits(void) { }
152 static inline void incr_rw_hits(void) { }
153 static inline void incr_hidden_hits(void) { }
154 static inline void incr_zi_released(void) { }
155 static inline void incr_npages_all(void) { }
156 static inline void set_npages(void) { }
157 
158 void tee_pager_get_stats(struct tee_pager_stats *stats)
159 {
160 	memset(stats, 0, sizeof(struct tee_pager_stats));
161 }
162 #endif /* CFG_WITH_STATS */
163 
164 struct core_mmu_table_info tee_pager_tbl_info;
165 static struct core_mmu_table_info pager_alias_tbl_info;
166 
167 static unsigned pager_lock = SPINLOCK_UNLOCK;
168 
169 /* Defines the range of the alias area */
170 static tee_mm_entry_t *pager_alias_area;
171 /*
172  * Physical pages are added in a stack like fashion to the alias area,
173  * @pager_alias_next_free gives the address of next free entry if
174  * @pager_alias_next_free is != 0
175  */
176 static uintptr_t pager_alias_next_free;
177 
178 static void set_alias_area(tee_mm_entry_t *mm)
179 {
180 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
181 	size_t tbl_va_size;
182 	unsigned idx;
183 	unsigned last_idx;
184 	vaddr_t smem = tee_mm_get_smem(mm);
185 	size_t nbytes = tee_mm_get_bytes(mm);
186 
187 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
188 
189 	TEE_ASSERT(!pager_alias_area);
190 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
191 		DMSG("Can't find translation table");
192 		panic();
193 	}
194 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
195 		DMSG("Unsupported page size in translation table %u",
196 		     1 << ti->shift);
197 		panic();
198 	}
199 
200 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
201 	if (!core_is_buffer_inside(smem, nbytes,
202 				   ti->va_base, tbl_va_size)) {
203 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
204 			smem, nbytes, ti->va_base, tbl_va_size);
205 		panic();
206 	}
207 
208 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
209 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
210 
211 	pager_alias_area = mm;
212 	pager_alias_next_free = smem;
213 
214 	/* Clear all mapping in the alias area */
215 	idx = core_mmu_va2idx(ti, smem);
216 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
217 	for (; idx < last_idx; idx++)
218 		core_mmu_set_entry(ti, idx, 0, 0);
219 
220 	/* TODO only invalidate entries touched above */
221 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
222 }
223 
224 static void generate_ae_key(void)
225 {
226 	TEE_Result res;
227 
228 	res = rng_generate(pager_ae_key, sizeof(pager_ae_key));
229 	TEE_ASSERT(res == TEE_SUCCESS);
230 }
231 
232 void tee_pager_init(tee_mm_entry_t *mm_alias)
233 {
234 	set_alias_area(mm_alias);
235 	generate_ae_key();
236 }
237 
238 static void *pager_add_alias_page(paddr_t pa)
239 {
240 	unsigned idx;
241 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
242 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
243 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
244 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
245 
246 	DMSG("0x%" PRIxPA, pa);
247 
248 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
249 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
250 	core_mmu_set_entry(ti, idx, pa, attr);
251 	pager_alias_next_free += SMALL_PAGE_SIZE;
252 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
253 				      tee_mm_get_bytes(pager_alias_area)))
254 		pager_alias_next_free = 0;
255 	return (void *)core_mmu_idx2va(ti, idx);
256 }
257 
258 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size,
259 			uint32_t flags, const void *store, const void *hashes)
260 {
261 	struct tee_pager_area *area = calloc(1, sizeof(*area));
262 	tee_mm_entry_t *mm_store = NULL;
263 
264 	if (!area)
265 		return NULL;
266 
267 	if (flags & TEE_PAGER_AREA_RW) {
268 		if (flags & TEE_PAGER_AREA_LOCK)
269 			goto out;
270 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
271 		if (!mm_store)
272 			goto bad;
273 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
274 					   MEM_AREA_TA_RAM);
275 		if (!area->store)
276 			goto bad;
277 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
278 				     sizeof(struct pager_rw_pstate));
279 		if (!area->u.rwp)
280 			goto bad;
281 	} else {
282 		area->store = (void *)store;
283 		area->u.hashes = hashes;
284 	}
285 out:
286 	area->base = base;
287 	area->size = size;
288 	area->flags = flags;
289 	return area;
290 bad:
291 	tee_mm_free(mm_store);
292 	free(area->u.rwp);
293 	free(area);
294 	return NULL;
295 }
296 
297 static void area_insert_tail(struct tee_pager_area *area)
298 {
299 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
300 
301 	cpu_spin_lock(&pager_lock);
302 
303 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
304 
305 	cpu_spin_unlock(&pager_lock);
306 	thread_set_exceptions(exceptions);
307 }
308 KEEP_PAGER(area_insert_tail);
309 
310 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
311 			const void *store, const void *hashes)
312 {
313 	struct tee_pager_area *area;
314 	size_t tbl_va_size;
315 
316 
317 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
318 		base, base + size, flags, store, hashes);
319 
320 	TEE_ASSERT(!(base & SMALL_PAGE_MASK) &&
321 			size && !(size & SMALL_PAGE_MASK));
322 
323 	if (flags & TEE_PAGER_AREA_RO)
324 		TEE_ASSERT(store && hashes);
325 	else if (flags & TEE_PAGER_AREA_RW)
326 		TEE_ASSERT(!store && !hashes);
327 	else
328 		panic();
329 
330 	if (!tee_pager_tbl_info.num_entries) {
331 		if (!core_mmu_find_table(base, UINT_MAX, &tee_pager_tbl_info))
332 			return false;
333 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
334 			DMSG("Unsupported page size in translation table %u",
335 			     1 << tee_pager_tbl_info.shift);
336 			return false;
337 		}
338 	}
339 
340 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
341 			tee_pager_tbl_info.num_entries;
342 	if (!core_is_buffer_inside(base, size, tee_pager_tbl_info.va_base,
343 				   tbl_va_size)) {
344 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
345 			base, size, tee_pager_tbl_info.va_base, tbl_va_size);
346 		return false;
347 	}
348 
349 	area = alloc_area(base, size, flags, store, hashes);
350 	if (!area)
351 		return false;
352 
353 	area_insert_tail(area);
354 	return true;
355 }
356 
357 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
358 {
359 	struct tee_pager_area *area;
360 
361 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
362 		if (core_is_buffer_inside(va, 1, area->base, area->size))
363 			return area;
364 	}
365 	return NULL;
366 }
367 
368 static uint32_t get_area_mattr(struct tee_pager_area *area)
369 {
370 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
371 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
372 			TEE_MATTR_SECURE | TEE_MATTR_PR;
373 
374 	if (!(area->flags & TEE_PAGER_AREA_RO))
375 		attr |= TEE_MATTR_PW;
376 	if (area->flags & TEE_PAGER_AREA_X)
377 		attr |= TEE_MATTR_PX;
378 	if (area->flags & TEE_PAGER_AREA_RW)
379 		attr |= TEE_MATTR_PW;
380 
381 	return attr;
382 }
383 
384 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
385 {
386 	paddr_t pa;
387 	unsigned idx;
388 
389 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
390 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
391 	return pa;
392 }
393 
394 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
395 			void *dst)
396 {
397 	struct pager_aes_gcm_iv iv = {
398 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
399 	};
400 
401 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
402 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
403 }
404 
405 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
406 {
407 	struct pager_aes_gcm_iv iv;
408 
409 	assert((rwp->iv + 1) > rwp->iv);
410 	rwp->iv++;
411 	/*
412 	 * IV is constructed as recommended in section "8.2.1 Deterministic
413 	 * Construction" of "Recommendation for Block Cipher Modes of
414 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
415 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
416 	 */
417 	iv.iv[0] = (vaddr_t)rwp;
418 	iv.iv[1] = rwp->iv >> 32;
419 	iv.iv[2] = rwp->iv;
420 
421 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
422 				   &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE))
423 		panic();
424 }
425 
426 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
427 			void *va_alias)
428 {
429 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
430 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
431 
432 	if (area->flags & TEE_PAGER_AREA_RO) {
433 		const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE;
434 
435 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
436 		incr_ro_hits();
437 
438 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
439 				TEE_SUCCESS) {
440 			EMSG("PH 0x%" PRIxVA " failed", page_va);
441 			panic();
442 		}
443 	} else if (area->flags & TEE_PAGER_AREA_LOCK) {
444 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
445 		memset(va_alias, 0, SMALL_PAGE_SIZE);
446 	} else {
447 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
448 			va_alias, page_va, area->u.rwp[idx].iv);
449 		if (!area->u.rwp[idx].iv)
450 			memset(va_alias, 0, SMALL_PAGE_SIZE);
451 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
452 				       va_alias)) {
453 			EMSG("PH 0x%" PRIxVA " failed", page_va);
454 			panic();
455 		}
456 		incr_rw_hits();
457 	}
458 }
459 
460 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
461 {
462 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
463 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
464 
465 	assert(!(pmem->area->flags & TEE_PAGER_AREA_LOCK));
466 
467 	if (attr & dirty_bits) {
468 		size_t idx = pmem->pgidx - core_mmu_va2idx(&tee_pager_tbl_info,
469 							   pmem->area->base);
470 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
471 
472 		assert(pmem->area->flags & TEE_PAGER_AREA_RW);
473 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
474 			     stored_page);
475 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
476 			core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx),
477 			pmem->area->u.rwp[idx].iv);
478 	}
479 }
480 
481 static bool tee_pager_unhide_page(vaddr_t page_va)
482 {
483 	struct tee_pager_pmem *pmem;
484 
485 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
486 		paddr_t pa;
487 		uint32_t attr;
488 
489 		if (pmem->pgidx == INVALID_PGIDX)
490 			continue;
491 
492 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
493 				   &pa, &attr);
494 
495 		if (!(attr &
496 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
497 			continue;
498 
499 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
500 		    pmem->pgidx) {
501 			uint32_t a = get_area_mattr(pmem->area);
502 
503 			/* page is hidden, show and move to back */
504 			assert(pa == get_pmem_pa(pmem));
505 			/*
506 			 * If it's not a dirty block, then it should be
507 			 * read only.
508 			 */
509 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
510 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
511 			else
512 				FMSG("Unhide %#" PRIxVA, page_va);
513 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
514 					   a);
515 
516 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
517 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
518 
519 			/* TODO only invalidate entry touched above */
520 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
521 
522 			incr_hidden_hits();
523 			return true;
524 		}
525 	}
526 
527 	return false;
528 }
529 
530 static void tee_pager_hide_pages(void)
531 {
532 	struct tee_pager_pmem *pmem;
533 	size_t n = 0;
534 
535 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
536 		paddr_t pa;
537 		uint32_t attr;
538 		uint32_t a;
539 
540 		if (n >= TEE_PAGER_NHIDE)
541 			break;
542 		n++;
543 
544 		/*
545 		 * we cannot hide pages when pmem->area is not defined as
546 		 * unhide requires pmem->area to be defined
547 		 */
548 		if (!pmem->area)
549 			continue;
550 
551 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
552 				   &pa, &attr);
553 		if (!(attr & TEE_MATTR_VALID_BLOCK))
554 			continue;
555 
556 		assert(pa == get_pmem_pa(pmem));
557 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
558 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
559 			FMSG("Hide %#" PRIxVA,
560 			     tee_pager_tbl_info.va_base +
561 			     pmem->pgidx * SMALL_PAGE_SIZE);
562 		} else
563 			a = TEE_MATTR_HIDDEN_BLOCK;
564 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa, a);
565 
566 	}
567 
568 	/* TODO only invalidate entries touched above */
569 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
570 }
571 
572 /*
573  * Find mapped pmem, hide and move to pageble pmem.
574  * Return false if page was not mapped, and true if page was mapped.
575  */
576 static bool tee_pager_release_one_phys(vaddr_t page_va)
577 {
578 	struct tee_pager_pmem *pmem;
579 	unsigned pgidx;
580 	paddr_t pa;
581 	uint32_t attr;
582 
583 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
584 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
585 
586 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
587 
588 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
589 		if (pmem->pgidx != pgidx)
590 			continue;
591 
592 		assert(pa == get_pmem_pa(pmem));
593 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
594 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
595 		pmem->area = NULL;
596 		pmem->pgidx = INVALID_PGIDX;
597 		tee_pager_npages++;
598 		set_npages();
599 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
600 		incr_zi_released();
601 		return true;
602 	}
603 
604 	return false;
605 }
606 
607 /* Finds the oldest page and unmats it from its old virtual address */
608 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags)
609 {
610 	struct tee_pager_pmem *pmem;
611 
612 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
613 	if (!pmem) {
614 		EMSG("No pmem entries");
615 		return NULL;
616 	}
617 	if (pmem->pgidx != INVALID_PGIDX) {
618 		uint32_t a;
619 
620 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx, NULL, &a);
621 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
622 		/* TODO only invalidate entries touched above */
623 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
624 		tee_pager_save_page(pmem, a);
625 	}
626 
627 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
628 	pmem->pgidx = INVALID_PGIDX;
629 	pmem->area = NULL;
630 	if (next_area_flags & TEE_PAGER_AREA_LOCK) {
631 		/* Move page to lock list */
632 		TEE_ASSERT(tee_pager_npages > 0);
633 		tee_pager_npages--;
634 		set_npages();
635 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
636 	} else {
637 		/* move page to back */
638 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
639 	}
640 
641 	return pmem;
642 }
643 
644 static bool pager_update_permissions(struct tee_pager_area *area,
645 			struct abort_info *ai)
646 {
647 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
648 	uint32_t attr;
649 	paddr_t pa;
650 
651 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
652 
653 	/* Not mapped */
654 	if (!(attr & TEE_MATTR_VALID_BLOCK))
655 		return false;
656 
657 	/* Not readable, should not happen */
658 	if (!(attr & TEE_MATTR_PR)) {
659 		abort_print_error(ai);
660 		panic();
661 	}
662 
663 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
664 	case CORE_MMU_FAULT_TRANSLATION:
665 	case CORE_MMU_FAULT_READ_PERMISSION:
666 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
667 		    !(attr & TEE_MATTR_PX)) {
668 			/* Attempting to execute from an NOX page */
669 			abort_print_error(ai);
670 			panic();
671 		}
672 		/* Since the page is mapped now it's OK */
673 		return true;
674 	case CORE_MMU_FAULT_WRITE_PERMISSION:
675 		if (!(area->flags & TEE_PAGER_AREA_RW)) {
676 			/* Attempting to write to an RO page */
677 			abort_print_error(ai);
678 			panic();
679 		}
680 		if (!(attr & TEE_MATTR_PW)) {
681 			FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK));
682 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx,
683 					   pa, attr | TEE_MATTR_PW);
684 			/* TODO only invalidate entry above */
685 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
686 		}
687 		/* Since permissions has been updated now it's OK */
688 		return true;
689 	default:
690 		/* Some fault we can't deal with */
691 		abort_print_error(ai);
692 		panic();
693 	}
694 
695 }
696 
697 #ifdef CFG_TEE_CORE_DEBUG
698 static void stat_handle_fault(void)
699 {
700 	static size_t num_faults;
701 	static size_t min_npages = SIZE_MAX;
702 	static size_t total_min_npages = SIZE_MAX;
703 
704 	num_faults++;
705 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
706 		DMSG("nfaults %zu npages %zu (min %zu)",
707 		     num_faults, tee_pager_npages, min_npages);
708 		min_npages = tee_pager_npages; /* reset */
709 	}
710 	if (tee_pager_npages < min_npages)
711 		min_npages = tee_pager_npages;
712 	if (tee_pager_npages < total_min_npages)
713 		total_min_npages = tee_pager_npages;
714 }
715 #else
716 static void stat_handle_fault(void)
717 {
718 }
719 #endif
720 
721 void tee_pager_handle_fault(struct abort_info *ai)
722 {
723 	struct tee_pager_area *area;
724 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
725 	uint32_t exceptions;
726 
727 #ifdef TEE_PAGER_DEBUG_PRINT
728 	abort_print(ai);
729 #endif
730 
731 	/*
732 	 * We're updating pages that can affect several active CPUs at a
733 	 * time below. We end up here because a thread tries to access some
734 	 * memory that isn't available. We have to be careful when making
735 	 * that memory available as other threads may succeed in accessing
736 	 * that address the moment after we've made it available.
737 	 *
738 	 * That means that we can't just map the memory and populate the
739 	 * page, instead we use the aliased mapping to populate the page
740 	 * and once everything is ready we map it.
741 	 */
742 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
743 	cpu_spin_lock(&pager_lock);
744 
745 	stat_handle_fault();
746 
747 	/* check if the access is valid */
748 	area = tee_pager_find_area(ai->va);
749 	if (!area) {
750 		abort_print_error(ai);
751 		EMSG("Invalid addr 0x%" PRIxVA, ai->va);
752 		panic();
753 	}
754 
755 	if (!tee_pager_unhide_page(page_va)) {
756 		struct tee_pager_pmem *pmem = NULL;
757 		uint32_t attr;
758 
759 		/*
760 		 * The page wasn't hidden, but some other core may have
761 		 * updated the table entry before we got here or we need
762 		 * to make a read-only page read-write (dirty).
763 		 */
764 		if (pager_update_permissions(area, ai)) {
765 			/*
766 			 * Kind of access is OK with the mapping, we're
767 			 * done here because the fault has already been
768 			 * dealt with by another core.
769 			 */
770 			goto out;
771 		}
772 
773 		pmem = tee_pager_get_page(area->flags);
774 		if (!pmem) {
775 			abort_print(ai);
776 			panic();
777 		}
778 
779 		/* load page code & data */
780 		tee_pager_load_page(area, page_va, pmem->va_alias);
781 
782 		/*
783 		 * We've updated the page using the aliased mapping and
784 		 * some cache maintenence is now needed if it's an
785 		 * executable page.
786 		 *
787 		 * Since the d-cache is a Physically-indexed,
788 		 * physically-tagged (PIPT) cache we can clean the aliased
789 		 * address instead of the real virtual address.
790 		 *
791 		 * The i-cache can also be PIPT, but may be something else
792 		 * to, to keep it simple we invalidate the entire i-cache.
793 		 * As a future optimization we may invalidate only the
794 		 * aliased area if it a PIPT cache else the entire cache.
795 		 */
796 		if (area->flags & TEE_PAGER_AREA_X) {
797 			/*
798 			 * Doing these operations to LoUIS (Level of
799 			 * unification, Inner Shareable) would be enough
800 			 */
801 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
802 				pmem->va_alias, SMALL_PAGE_SIZE);
803 
804 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
805 		}
806 
807 		pmem->area = area;
808 		pmem->pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
809 		attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW);
810 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx,
811 				   get_pmem_pa(pmem), attr);
812 
813 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
814 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx),
815 				     get_pmem_pa(pmem));
816 
817 	}
818 
819 	tee_pager_hide_pages();
820 out:
821 	cpu_spin_unlock(&pager_lock);
822 	thread_unmask_exceptions(exceptions);
823 }
824 
825 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
826 {
827 	size_t n;
828 
829 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
830 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
831 
832 	/* setup memory */
833 	for (n = 0; n < npages; n++) {
834 		struct tee_pager_pmem *pmem;
835 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
836 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
837 		paddr_t pa;
838 		uint32_t attr;
839 
840 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
841 
842 		/* Ignore unmapped pages/blocks */
843 		if (!(attr & TEE_MATTR_VALID_BLOCK))
844 			continue;
845 
846 		pmem = malloc(sizeof(struct tee_pager_pmem));
847 		if (pmem == NULL) {
848 			EMSG("Can't allocate memory");
849 			panic();
850 		}
851 
852 		pmem->va_alias = pager_add_alias_page(pa);
853 
854 		if (unmap) {
855 			pmem->area = NULL;
856 			pmem->pgidx = INVALID_PGIDX;
857 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
858 		} else {
859 			/*
860 			 * The page is still mapped, let's assign the area
861 			 * and update the protection bits accordingly.
862 			 */
863 			pmem->area = tee_pager_find_area(va);
864 			pmem->pgidx = pgidx;
865 			assert(pa == get_pmem_pa(pmem));
866 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
867 					   get_area_mattr(pmem->area));
868 		}
869 
870 		tee_pager_npages++;
871 		incr_npages_all();
872 		set_npages();
873 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
874 	}
875 
876 	/* Invalidate secure TLB */
877 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
878 }
879 
880 void tee_pager_release_phys(void *addr, size_t size)
881 {
882 	bool unmaped = false;
883 	vaddr_t va = (vaddr_t)addr;
884 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
885 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
886 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
887 
888 	cpu_spin_lock(&pager_lock);
889 
890 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
891 		unmaped |= tee_pager_release_one_phys(va);
892 
893 	/* Invalidate secure TLB */
894 	if (unmaped)
895 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
896 
897 	cpu_spin_unlock(&pager_lock);
898 	thread_set_exceptions(exceptions);
899 }
900 KEEP_PAGER(tee_pager_release_phys);
901 
902 void *tee_pager_alloc(size_t size, uint32_t flags)
903 {
904 	tee_mm_entry_t *mm;
905 	uint32_t f = TEE_PAGER_AREA_RW | (flags & TEE_PAGER_AREA_LOCK);
906 
907 	if (!size)
908 		return NULL;
909 
910 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
911 	if (!mm)
912 		return NULL;
913 
914 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
915 				f, NULL, NULL);
916 
917 	return (void *)tee_mm_get_smem(mm);
918 }
919