xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 8ddf5a4e3ce277adee040d90758ec08b429e9e4f)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <assert.h>
30 #include <keep.h>
31 #include <sys/queue.h>
32 #include <kernel/abort.h>
33 #include <kernel/panic.h>
34 #include <kernel/tee_misc.h>
35 #include <kernel/tee_ta_manager.h>
36 #include <kernel/thread.h>
37 #include <kernel/tz_proc.h>
38 #include <mm/core_memprot.h>
39 #include <mm/tee_mm.h>
40 #include <mm/tee_mmu_defs.h>
41 #include <mm/tee_pager.h>
42 #include <types_ext.h>
43 #include <stdlib.h>
44 #include <tee_api_defines.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <trace.h>
47 #include <utee_defines.h>
48 #include <util.h>
49 
50 #include "pager_private.h"
51 
52 #define PAGER_AE_KEY_BITS	256
53 
54 struct pager_rw_pstate {
55 	uint64_t iv;
56 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
57 };
58 
59 struct tee_pager_area {
60 	union {
61 		const uint8_t *hashes;
62 		struct pager_rw_pstate *rwp;
63 	} u;
64 	uint8_t *store;
65 	uint32_t flags;
66 	vaddr_t base;
67 	size_t size;
68 	TAILQ_ENTRY(tee_pager_area) link;
69 };
70 
71 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
72 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
73 
74 #define INVALID_PGIDX	UINT_MAX
75 
76 /*
77  * struct tee_pager_pmem - Represents a physical page used for paging.
78  *
79  * @pgidx	an index of the entry in tee_pager_tbl_info.
80  * @va_alias	Virtual address where the physical page always is aliased.
81  *		Used during remapping of the page when the content need to
82  *		be updated before it's available at the new location.
83  * @area	a pointer to the pager area
84  */
85 struct tee_pager_pmem {
86 	unsigned pgidx;
87 	void *va_alias;
88 	struct tee_pager_area *area;
89 	TAILQ_ENTRY(tee_pager_pmem) link;
90 };
91 
92 /* The list of physical pages. The first page in the list is the oldest */
93 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
94 
95 static struct tee_pager_pmem_head tee_pager_pmem_head =
96 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
97 
98 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
99 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
100 
101 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
102 
103 /* number of pages hidden */
104 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
105 
106 /* Number of registered physical pages, used hiding pages. */
107 static size_t tee_pager_npages;
108 
109 #ifdef CFG_WITH_STATS
110 static struct tee_pager_stats pager_stats;
111 
112 static inline void incr_ro_hits(void)
113 {
114 	pager_stats.ro_hits++;
115 }
116 
117 static inline void incr_rw_hits(void)
118 {
119 	pager_stats.rw_hits++;
120 }
121 
122 static inline void incr_hidden_hits(void)
123 {
124 	pager_stats.hidden_hits++;
125 }
126 
127 static inline void incr_zi_released(void)
128 {
129 	pager_stats.zi_released++;
130 }
131 
132 static inline void incr_npages_all(void)
133 {
134 	pager_stats.npages_all++;
135 }
136 
137 static inline void set_npages(void)
138 {
139 	pager_stats.npages = tee_pager_npages;
140 }
141 
142 void tee_pager_get_stats(struct tee_pager_stats *stats)
143 {
144 	*stats = pager_stats;
145 
146 	pager_stats.hidden_hits = 0;
147 	pager_stats.ro_hits = 0;
148 	pager_stats.rw_hits = 0;
149 	pager_stats.zi_released = 0;
150 }
151 
152 #else /* CFG_WITH_STATS */
153 static inline void incr_ro_hits(void) { }
154 static inline void incr_rw_hits(void) { }
155 static inline void incr_hidden_hits(void) { }
156 static inline void incr_zi_released(void) { }
157 static inline void incr_npages_all(void) { }
158 static inline void set_npages(void) { }
159 
160 void tee_pager_get_stats(struct tee_pager_stats *stats)
161 {
162 	memset(stats, 0, sizeof(struct tee_pager_stats));
163 }
164 #endif /* CFG_WITH_STATS */
165 
166 struct core_mmu_table_info tee_pager_tbl_info;
167 static struct core_mmu_table_info pager_alias_tbl_info;
168 
169 static unsigned pager_lock = SPINLOCK_UNLOCK;
170 
171 /* Defines the range of the alias area */
172 static tee_mm_entry_t *pager_alias_area;
173 /*
174  * Physical pages are added in a stack like fashion to the alias area,
175  * @pager_alias_next_free gives the address of next free entry if
176  * @pager_alias_next_free is != 0
177  */
178 static uintptr_t pager_alias_next_free;
179 
180 static void set_alias_area(tee_mm_entry_t *mm)
181 {
182 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
183 	size_t tbl_va_size;
184 	unsigned idx;
185 	unsigned last_idx;
186 	vaddr_t smem = tee_mm_get_smem(mm);
187 	size_t nbytes = tee_mm_get_bytes(mm);
188 
189 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
190 
191 	TEE_ASSERT(!pager_alias_area);
192 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
193 		DMSG("Can't find translation table");
194 		panic();
195 	}
196 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
197 		DMSG("Unsupported page size in translation table %u",
198 		     1 << ti->shift);
199 		panic();
200 	}
201 
202 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
203 	if (!core_is_buffer_inside(smem, nbytes,
204 				   ti->va_base, tbl_va_size)) {
205 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
206 			smem, nbytes, ti->va_base, tbl_va_size);
207 		panic();
208 	}
209 
210 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
211 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
212 
213 	pager_alias_area = mm;
214 	pager_alias_next_free = smem;
215 
216 	/* Clear all mapping in the alias area */
217 	idx = core_mmu_va2idx(ti, smem);
218 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
219 	for (; idx < last_idx; idx++)
220 		core_mmu_set_entry(ti, idx, 0, 0);
221 
222 	/* TODO only invalidate entries touched above */
223 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
224 }
225 
226 static void generate_ae_key(void)
227 {
228 	TEE_Result res;
229 
230 	res = rng_generate(pager_ae_key, sizeof(pager_ae_key));
231 	TEE_ASSERT(res == TEE_SUCCESS);
232 }
233 
234 void tee_pager_init(tee_mm_entry_t *mm_alias)
235 {
236 	set_alias_area(mm_alias);
237 	generate_ae_key();
238 }
239 
240 static void *pager_add_alias_page(paddr_t pa)
241 {
242 	unsigned idx;
243 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
244 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
245 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
246 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
247 
248 	DMSG("0x%" PRIxPA, pa);
249 
250 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
251 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
252 	core_mmu_set_entry(ti, idx, pa, attr);
253 	pager_alias_next_free += SMALL_PAGE_SIZE;
254 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
255 				      tee_mm_get_bytes(pager_alias_area)))
256 		pager_alias_next_free = 0;
257 	return (void *)core_mmu_idx2va(ti, idx);
258 }
259 
260 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size,
261 			uint32_t flags, const void *store, const void *hashes)
262 {
263 	struct tee_pager_area *area = calloc(1, sizeof(*area));
264 	tee_mm_entry_t *mm_store = NULL;
265 
266 	if (!area)
267 		return NULL;
268 
269 	if (flags & TEE_MATTR_PW) {
270 		if (flags & TEE_MATTR_LOCKED)
271 			goto out;
272 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
273 		if (!mm_store)
274 			goto bad;
275 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
276 					   MEM_AREA_TA_RAM);
277 		if (!area->store)
278 			goto bad;
279 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
280 				     sizeof(struct pager_rw_pstate));
281 		if (!area->u.rwp)
282 			goto bad;
283 	} else {
284 		area->store = (void *)store;
285 		area->u.hashes = hashes;
286 	}
287 out:
288 	area->base = base;
289 	area->size = size;
290 	area->flags = flags;
291 	return area;
292 bad:
293 	tee_mm_free(mm_store);
294 	free(area->u.rwp);
295 	free(area);
296 	return NULL;
297 }
298 
299 static void area_insert_tail(struct tee_pager_area *area)
300 {
301 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
302 
303 	cpu_spin_lock(&pager_lock);
304 
305 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
306 
307 	cpu_spin_unlock(&pager_lock);
308 	thread_set_exceptions(exceptions);
309 }
310 KEEP_PAGER(area_insert_tail);
311 
312 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
313 			const void *store, const void *hashes)
314 {
315 	struct tee_pager_area *area;
316 	size_t tbl_va_size;
317 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
318 
319 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
320 		base, base + size, flags, store, hashes);
321 
322 	TEE_ASSERT(!(base & SMALL_PAGE_MASK) &&
323 			size && !(size & SMALL_PAGE_MASK));
324 
325 	if (!(flags & TEE_MATTR_PW))
326 		TEE_ASSERT(store && hashes);
327 	else if (flags & TEE_MATTR_PW)
328 		TEE_ASSERT(!store && !hashes);
329 	else
330 		panic();
331 
332 
333 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
334 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
335 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
336 			base, size, ti->va_base, tbl_va_size);
337 		return false;
338 	}
339 
340 	area = alloc_area(base, size, flags, store, hashes);
341 	if (!area)
342 		return false;
343 
344 	area_insert_tail(area);
345 	return true;
346 }
347 
348 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
349 {
350 	struct tee_pager_area *area;
351 
352 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
353 		if (core_is_buffer_inside(va, 1, area->base, area->size))
354 			return area;
355 	}
356 	return NULL;
357 }
358 
359 static uint32_t get_area_mattr(struct tee_pager_area *area)
360 {
361 	return TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
362 	       TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
363 	       TEE_MATTR_SECURE | TEE_MATTR_PR |
364 	       (area->flags & TEE_MATTR_PRWX);
365 }
366 
367 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
368 {
369 	paddr_t pa;
370 	unsigned idx;
371 
372 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
373 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
374 	return pa;
375 }
376 
377 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
378 			void *dst)
379 {
380 	struct pager_aes_gcm_iv iv = {
381 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
382 	};
383 
384 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
385 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
386 }
387 
388 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
389 {
390 	struct pager_aes_gcm_iv iv;
391 
392 	assert((rwp->iv + 1) > rwp->iv);
393 	rwp->iv++;
394 	/*
395 	 * IV is constructed as recommended in section "8.2.1 Deterministic
396 	 * Construction" of "Recommendation for Block Cipher Modes of
397 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
398 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
399 	 */
400 	iv.iv[0] = (vaddr_t)rwp;
401 	iv.iv[1] = rwp->iv >> 32;
402 	iv.iv[2] = rwp->iv;
403 
404 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
405 				   &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE))
406 		panic();
407 }
408 
409 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
410 			void *va_alias)
411 {
412 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
413 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
414 
415 	if (!(area->flags & TEE_MATTR_PW)) {
416 		const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE;
417 
418 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
419 		incr_ro_hits();
420 
421 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
422 				TEE_SUCCESS) {
423 			EMSG("PH 0x%" PRIxVA " failed", page_va);
424 			panic();
425 		}
426 	} else if (area->flags & TEE_MATTR_LOCKED) {
427 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
428 		memset(va_alias, 0, SMALL_PAGE_SIZE);
429 	} else {
430 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
431 			va_alias, page_va, area->u.rwp[idx].iv);
432 		if (!area->u.rwp[idx].iv)
433 			memset(va_alias, 0, SMALL_PAGE_SIZE);
434 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
435 				       va_alias)) {
436 			EMSG("PH 0x%" PRIxVA " failed", page_va);
437 			panic();
438 		}
439 		incr_rw_hits();
440 	}
441 }
442 
443 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
444 {
445 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
446 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
447 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
448 
449 	assert(!(pmem->area->flags & TEE_MATTR_LOCKED));
450 
451 	if (attr & dirty_bits) {
452 		size_t idx = pmem->pgidx - core_mmu_va2idx(ti,
453 							   pmem->area->base);
454 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
455 
456 		assert(pmem->area->flags & TEE_MATTR_PW);
457 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
458 			     stored_page);
459 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
460 			core_mmu_idx2va(ti, pmem->pgidx),
461 			pmem->area->u.rwp[idx].iv);
462 	}
463 }
464 
465 static bool tee_pager_unhide_page(vaddr_t page_va)
466 {
467 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
468 	struct tee_pager_pmem *pmem;
469 
470 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
471 		paddr_t pa;
472 		uint32_t attr;
473 
474 		if (pmem->pgidx == INVALID_PGIDX)
475 			continue;
476 
477 		core_mmu_get_entry(ti, pmem->pgidx,
478 				   &pa, &attr);
479 
480 		if (!(attr &
481 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
482 			continue;
483 
484 		if (core_mmu_va2idx(ti, page_va) == pmem->pgidx) {
485 			uint32_t a = get_area_mattr(pmem->area);
486 
487 			/* page is hidden, show and move to back */
488 			TEE_ASSERT(pa == get_pmem_pa(pmem));
489 			/*
490 			 * If it's not a dirty block, then it should be
491 			 * read only.
492 			 */
493 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
494 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
495 			else
496 				FMSG("Unhide %#" PRIxVA, page_va);
497 			core_mmu_set_entry(ti, pmem->pgidx, pa, a);
498 
499 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
500 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
501 
502 			/* TODO only invalidate entry touched above */
503 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
504 
505 			incr_hidden_hits();
506 			return true;
507 		}
508 	}
509 
510 	return false;
511 }
512 
513 static void tee_pager_hide_pages(void)
514 {
515 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
516 	struct tee_pager_pmem *pmem;
517 	size_t n = 0;
518 
519 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
520 		paddr_t pa;
521 		uint32_t attr;
522 		uint32_t a;
523 
524 		if (n >= TEE_PAGER_NHIDE)
525 			break;
526 		n++;
527 
528 		/*
529 		 * we cannot hide pages when pmem->area is not defined as
530 		 * unhide requires pmem->area to be defined
531 		 */
532 		if (!pmem->area)
533 			continue;
534 
535 		core_mmu_get_entry(ti, pmem->pgidx, &pa, &attr);
536 		if (!(attr & TEE_MATTR_VALID_BLOCK))
537 			continue;
538 
539 		assert(pa == get_pmem_pa(pmem));
540 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
541 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
542 			FMSG("Hide %#" PRIxVA,
543 			     ti->va_base + pmem->pgidx * SMALL_PAGE_SIZE);
544 		} else
545 			a = TEE_MATTR_HIDDEN_BLOCK;
546 		core_mmu_set_entry(ti, pmem->pgidx, pa, a);
547 
548 	}
549 
550 	/* TODO only invalidate entries touched above */
551 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
552 }
553 
554 /*
555  * Find mapped pmem, hide and move to pageble pmem.
556  * Return false if page was not mapped, and true if page was mapped.
557  */
558 static bool tee_pager_release_one_phys(vaddr_t page_va)
559 {
560 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
561 	struct tee_pager_pmem *pmem;
562 	unsigned pgidx;
563 	paddr_t pa;
564 	uint32_t attr;
565 
566 	pgidx = core_mmu_va2idx(ti, page_va);
567 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
568 
569 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
570 
571 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
572 		if (pmem->pgidx != pgidx)
573 			continue;
574 
575 		assert(pa == get_pmem_pa(pmem));
576 		core_mmu_set_entry(ti, pgidx, 0, 0);
577 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
578 		pmem->area = NULL;
579 		pmem->pgidx = INVALID_PGIDX;
580 		tee_pager_npages++;
581 		set_npages();
582 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
583 		incr_zi_released();
584 		return true;
585 	}
586 
587 	return false;
588 }
589 
590 /* Finds the oldest page and unmats it from its old virtual address */
591 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags)
592 {
593 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
594 	struct tee_pager_pmem *pmem;
595 
596 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
597 	if (!pmem) {
598 		EMSG("No pmem entries");
599 		return NULL;
600 	}
601 	if (pmem->pgidx != INVALID_PGIDX) {
602 		uint32_t a;
603 
604 		core_mmu_get_entry(ti, pmem->pgidx, NULL, &a);
605 		core_mmu_set_entry(ti, pmem->pgidx, 0, 0);
606 		/* TODO only invalidate entries touched above */
607 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
608 		tee_pager_save_page(pmem, a);
609 	}
610 
611 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
612 	pmem->pgidx = INVALID_PGIDX;
613 	pmem->area = NULL;
614 	if (next_area_flags & TEE_MATTR_LOCKED) {
615 		/* Move page to lock list */
616 		TEE_ASSERT(tee_pager_npages > 0);
617 		tee_pager_npages--;
618 		set_npages();
619 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
620 	} else {
621 		/* move page to back */
622 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
623 	}
624 
625 	return pmem;
626 }
627 
628 static bool pager_update_permissions(struct tee_pager_area *area,
629 			struct abort_info *ai)
630 {
631 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
632 	unsigned pgidx = core_mmu_va2idx(ti, ai->va);
633 	uint32_t attr;
634 	paddr_t pa;
635 
636 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
637 
638 	/* Not mapped */
639 	if (!(attr & TEE_MATTR_VALID_BLOCK))
640 		return false;
641 
642 	/* Not readable, should not happen */
643 	if (!(attr & TEE_MATTR_PR)) {
644 		abort_print_error(ai);
645 		panic();
646 	}
647 
648 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
649 	case CORE_MMU_FAULT_TRANSLATION:
650 	case CORE_MMU_FAULT_READ_PERMISSION:
651 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
652 		    !(attr & TEE_MATTR_PX)) {
653 			/* Attempting to execute from an NOX page */
654 			abort_print_error(ai);
655 			panic();
656 		}
657 		/* Since the page is mapped now it's OK */
658 		return true;
659 	case CORE_MMU_FAULT_WRITE_PERMISSION:
660 		if (!(area->flags & TEE_MATTR_PW)) {
661 			/* Attempting to write to an RO page */
662 			abort_print_error(ai);
663 			panic();
664 		}
665 		if (!(attr & TEE_MATTR_PW)) {
666 			FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK));
667 			core_mmu_set_entry(ti, pgidx, pa, attr | TEE_MATTR_PW);
668 			/* TODO only invalidate entry above */
669 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
670 		}
671 		/* Since permissions has been updated now it's OK */
672 		return true;
673 	default:
674 		/* Some fault we can't deal with */
675 		abort_print_error(ai);
676 		panic();
677 	}
678 
679 }
680 
681 #ifdef CFG_TEE_CORE_DEBUG
682 static void stat_handle_fault(void)
683 {
684 	static size_t num_faults;
685 	static size_t min_npages = SIZE_MAX;
686 	static size_t total_min_npages = SIZE_MAX;
687 
688 	num_faults++;
689 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
690 		DMSG("nfaults %zu npages %zu (min %zu)",
691 		     num_faults, tee_pager_npages, min_npages);
692 		min_npages = tee_pager_npages; /* reset */
693 	}
694 	if (tee_pager_npages < min_npages)
695 		min_npages = tee_pager_npages;
696 	if (tee_pager_npages < total_min_npages)
697 		total_min_npages = tee_pager_npages;
698 }
699 #else
700 static void stat_handle_fault(void)
701 {
702 }
703 #endif
704 
705 bool tee_pager_handle_fault(struct abort_info *ai)
706 {
707 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
708 	struct tee_pager_area *area;
709 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
710 	uint32_t exceptions;
711 	bool ret;
712 
713 #ifdef TEE_PAGER_DEBUG_PRINT
714 	abort_print(ai);
715 #endif
716 
717 	/*
718 	 * We're updating pages that can affect several active CPUs at a
719 	 * time below. We end up here because a thread tries to access some
720 	 * memory that isn't available. We have to be careful when making
721 	 * that memory available as other threads may succeed in accessing
722 	 * that address the moment after we've made it available.
723 	 *
724 	 * That means that we can't just map the memory and populate the
725 	 * page, instead we use the aliased mapping to populate the page
726 	 * and once everything is ready we map it.
727 	 */
728 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
729 	cpu_spin_lock(&pager_lock);
730 
731 	stat_handle_fault();
732 
733 	/* check if the access is valid */
734 	area = tee_pager_find_area(ai->va);
735 	if (!area) {
736 		EMSG("Invalid addr 0x%" PRIxVA, ai->va);
737 		ret = false;
738 		goto out;
739 	}
740 
741 	if (!tee_pager_unhide_page(page_va)) {
742 		struct tee_pager_pmem *pmem = NULL;
743 		uint32_t attr;
744 
745 		/*
746 		 * The page wasn't hidden, but some other core may have
747 		 * updated the table entry before we got here or we need
748 		 * to make a read-only page read-write (dirty).
749 		 */
750 		if (pager_update_permissions(area, ai)) {
751 			/*
752 			 * Kind of access is OK with the mapping, we're
753 			 * done here because the fault has already been
754 			 * dealt with by another core.
755 			 */
756 			ret = true;
757 			goto out;
758 		}
759 
760 		pmem = tee_pager_get_page(area->flags);
761 		if (!pmem) {
762 			abort_print(ai);
763 			panic();
764 		}
765 
766 		/* load page code & data */
767 		tee_pager_load_page(area, page_va, pmem->va_alias);
768 
769 		/*
770 		 * We've updated the page using the aliased mapping and
771 		 * some cache maintenence is now needed if it's an
772 		 * executable page.
773 		 *
774 		 * Since the d-cache is a Physically-indexed,
775 		 * physically-tagged (PIPT) cache we can clean the aliased
776 		 * address instead of the real virtual address.
777 		 *
778 		 * The i-cache can also be PIPT, but may be something else
779 		 * to, to keep it simple we invalidate the entire i-cache.
780 		 * As a future optimization we may invalidate only the
781 		 * aliased area if it a PIPT cache else the entire cache.
782 		 */
783 		if (area->flags & TEE_MATTR_PX) {
784 			/*
785 			 * Doing these operations to LoUIS (Level of
786 			 * unification, Inner Shareable) would be enough
787 			 */
788 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
789 				pmem->va_alias, SMALL_PAGE_SIZE);
790 
791 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
792 		}
793 
794 		pmem->area = area;
795 		pmem->pgidx = core_mmu_va2idx(ti, ai->va);
796 		attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW);
797 		core_mmu_set_entry(ti, pmem->pgidx, get_pmem_pa(pmem), attr);
798 
799 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
800 		     core_mmu_idx2va(ti, pmem->pgidx), get_pmem_pa(pmem));
801 
802 	}
803 
804 	tee_pager_hide_pages();
805 	ret = true;
806 out:
807 	cpu_spin_unlock(&pager_lock);
808 	thread_unmask_exceptions(exceptions);
809 	return ret;
810 }
811 
812 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
813 {
814 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
815 	size_t n;
816 
817 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
818 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
819 
820 	/* setup memory */
821 	for (n = 0; n < npages; n++) {
822 		struct tee_pager_pmem *pmem;
823 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
824 		unsigned pgidx = core_mmu_va2idx(ti, va);
825 		paddr_t pa;
826 		uint32_t attr;
827 
828 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
829 
830 		/* Ignore unmapped pages/blocks */
831 		if (!(attr & TEE_MATTR_VALID_BLOCK))
832 			continue;
833 
834 		pmem = malloc(sizeof(struct tee_pager_pmem));
835 		if (pmem == NULL) {
836 			EMSG("Can't allocate memory");
837 			panic();
838 		}
839 
840 		pmem->va_alias = pager_add_alias_page(pa);
841 
842 		if (unmap) {
843 			pmem->area = NULL;
844 			pmem->pgidx = INVALID_PGIDX;
845 			core_mmu_set_entry(ti, pgidx, 0, 0);
846 		} else {
847 			/*
848 			 * The page is still mapped, let's assign the area
849 			 * and update the protection bits accordingly.
850 			 */
851 			pmem->area = tee_pager_find_area(va);
852 			pmem->pgidx = pgidx;
853 			assert(pa == get_pmem_pa(pmem));
854 			core_mmu_set_entry(ti, pgidx, pa,
855 					   get_area_mattr(pmem->area));
856 		}
857 
858 		tee_pager_npages++;
859 		incr_npages_all();
860 		set_npages();
861 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
862 	}
863 
864 	/* Invalidate secure TLB */
865 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
866 }
867 
868 void tee_pager_release_phys(void *addr, size_t size)
869 {
870 	bool unmaped = false;
871 	vaddr_t va = (vaddr_t)addr;
872 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
873 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
874 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
875 
876 	cpu_spin_lock(&pager_lock);
877 
878 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
879 		unmaped |= tee_pager_release_one_phys(va);
880 
881 	/* Invalidate secure TLB */
882 	if (unmaped)
883 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
884 
885 	cpu_spin_unlock(&pager_lock);
886 	thread_set_exceptions(exceptions);
887 }
888 KEEP_PAGER(tee_pager_release_phys);
889 
890 void *tee_pager_alloc(size_t size, uint32_t flags)
891 {
892 	tee_mm_entry_t *mm;
893 	uint32_t f = TEE_MATTR_PRW | (flags & TEE_MATTR_LOCKED);
894 
895 	if (!size)
896 		return NULL;
897 
898 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
899 	if (!mm)
900 		return NULL;
901 
902 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
903 				f, NULL, NULL);
904 
905 	return (void *)tee_mm_get_smem(mm);
906 }
907