xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision d10c4c4b2d7661c542400f174b7cd4499349385c)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_memprot.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 #include <keep.h>
48 #include "pager_private.h"
49 
50 #define PAGER_AE_KEY_BITS	256
51 
52 struct pager_rw_pstate {
53 	uint64_t iv;
54 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
55 };
56 
57 struct tee_pager_area {
58 	union {
59 		const uint8_t *hashes;
60 		struct pager_rw_pstate *rwp;
61 	} u;
62 	uint8_t *store;
63 	uint32_t flags;
64 	vaddr_t base;
65 	size_t size;
66 	TAILQ_ENTRY(tee_pager_area) link;
67 };
68 
69 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
70 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
71 
72 #define INVALID_PGIDX	UINT_MAX
73 
74 /*
75  * struct tee_pager_pmem - Represents a physical page used for paging.
76  *
77  * @pgidx	an index of the entry in tee_pager_tbl_info.
78  * @va_alias	Virtual address where the physical page always is aliased.
79  *		Used during remapping of the page when the content need to
80  *		be updated before it's available at the new location.
81  * @area	a pointer to the pager area
82  */
83 struct tee_pager_pmem {
84 	unsigned pgidx;
85 	void *va_alias;
86 	struct tee_pager_area *area;
87 	TAILQ_ENTRY(tee_pager_pmem) link;
88 };
89 
90 /* The list of physical pages. The first page in the list is the oldest */
91 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
92 
93 static struct tee_pager_pmem_head tee_pager_pmem_head =
94 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
95 
96 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
97 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
98 
99 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
100 
101 /* number of pages hidden */
102 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
103 
104 /* Number of registered physical pages, used hiding pages. */
105 static size_t tee_pager_npages;
106 
107 #ifdef CFG_WITH_STATS
108 static struct tee_pager_stats pager_stats;
109 
110 static inline void incr_ro_hits(void)
111 {
112 	pager_stats.ro_hits++;
113 }
114 
115 static inline void incr_rw_hits(void)
116 {
117 	pager_stats.rw_hits++;
118 }
119 
120 static inline void incr_hidden_hits(void)
121 {
122 	pager_stats.hidden_hits++;
123 }
124 
125 static inline void incr_zi_released(void)
126 {
127 	pager_stats.zi_released++;
128 }
129 
130 static inline void incr_npages_all(void)
131 {
132 	pager_stats.npages_all++;
133 }
134 
135 static inline void set_npages(void)
136 {
137 	pager_stats.npages = tee_pager_npages;
138 }
139 
140 void tee_pager_get_stats(struct tee_pager_stats *stats)
141 {
142 	*stats = pager_stats;
143 
144 	pager_stats.hidden_hits = 0;
145 	pager_stats.ro_hits = 0;
146 	pager_stats.rw_hits = 0;
147 	pager_stats.zi_released = 0;
148 }
149 
150 #else /* CFG_WITH_STATS */
151 static inline void incr_ro_hits(void) { }
152 static inline void incr_rw_hits(void) { }
153 static inline void incr_hidden_hits(void) { }
154 static inline void incr_zi_released(void) { }
155 static inline void incr_npages_all(void) { }
156 static inline void set_npages(void) { }
157 
158 void tee_pager_get_stats(struct tee_pager_stats *stats)
159 {
160 	memset(stats, 0, sizeof(struct tee_pager_stats));
161 }
162 #endif /* CFG_WITH_STATS */
163 
164 struct core_mmu_table_info tee_pager_tbl_info;
165 static struct core_mmu_table_info pager_alias_tbl_info;
166 
167 static unsigned pager_lock = SPINLOCK_UNLOCK;
168 
169 /* Defines the range of the alias area */
170 static tee_mm_entry_t *pager_alias_area;
171 /*
172  * Physical pages are added in a stack like fashion to the alias area,
173  * @pager_alias_next_free gives the address of next free entry if
174  * @pager_alias_next_free is != 0
175  */
176 static uintptr_t pager_alias_next_free;
177 
178 static void set_alias_area(tee_mm_entry_t *mm)
179 {
180 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
181 	size_t tbl_va_size;
182 	unsigned idx;
183 	unsigned last_idx;
184 	vaddr_t smem = tee_mm_get_smem(mm);
185 	size_t nbytes = tee_mm_get_bytes(mm);
186 
187 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
188 
189 	TEE_ASSERT(!pager_alias_area);
190 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
191 		DMSG("Can't find translation table");
192 		panic();
193 	}
194 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
195 		DMSG("Unsupported page size in translation table %u",
196 		     1 << ti->shift);
197 		panic();
198 	}
199 
200 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
201 	if (!core_is_buffer_inside(smem, nbytes,
202 				   ti->va_base, tbl_va_size)) {
203 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
204 			smem, nbytes, ti->va_base, tbl_va_size);
205 		panic();
206 	}
207 
208 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
209 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
210 
211 	pager_alias_area = mm;
212 	pager_alias_next_free = smem;
213 
214 	/* Clear all mapping in the alias area */
215 	idx = core_mmu_va2idx(ti, smem);
216 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
217 	for (; idx < last_idx; idx++)
218 		core_mmu_set_entry(ti, idx, 0, 0);
219 
220 	/* TODO only invalidate entries touched above */
221 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
222 }
223 
224 static void generate_ae_key(void)
225 {
226 	TEE_Result res;
227 
228 	res = rng_generate(pager_ae_key, sizeof(pager_ae_key));
229 	TEE_ASSERT(res == TEE_SUCCESS);
230 }
231 
232 void tee_pager_init(tee_mm_entry_t *mm_alias)
233 {
234 	set_alias_area(mm_alias);
235 	generate_ae_key();
236 }
237 
238 static void *pager_add_alias_page(paddr_t pa)
239 {
240 	unsigned idx;
241 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
242 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
243 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
244 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
245 
246 	DMSG("0x%" PRIxPA, pa);
247 
248 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
249 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
250 	core_mmu_set_entry(ti, idx, pa, attr);
251 	pager_alias_next_free += SMALL_PAGE_SIZE;
252 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
253 				      tee_mm_get_bytes(pager_alias_area)))
254 		pager_alias_next_free = 0;
255 	return (void *)core_mmu_idx2va(ti, idx);
256 }
257 
258 static struct tee_pager_area *alloc_area(vaddr_t base, size_t size,
259 			uint32_t flags, const void *store, const void *hashes)
260 {
261 	struct tee_pager_area *area = calloc(1, sizeof(*area));
262 	tee_mm_entry_t *mm_store = NULL;
263 
264 	if (!area)
265 		return NULL;
266 
267 	if (flags & TEE_MATTR_PW) {
268 		if (flags & TEE_MATTR_LOCKED)
269 			goto out;
270 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
271 		if (!mm_store)
272 			goto bad;
273 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
274 					   MEM_AREA_TA_RAM);
275 		if (!area->store)
276 			goto bad;
277 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
278 				     sizeof(struct pager_rw_pstate));
279 		if (!area->u.rwp)
280 			goto bad;
281 	} else {
282 		area->store = (void *)store;
283 		area->u.hashes = hashes;
284 	}
285 out:
286 	area->base = base;
287 	area->size = size;
288 	area->flags = flags;
289 	return area;
290 bad:
291 	tee_mm_free(mm_store);
292 	free(area->u.rwp);
293 	free(area);
294 	return NULL;
295 }
296 
297 static void area_insert_tail(struct tee_pager_area *area)
298 {
299 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
300 
301 	cpu_spin_lock(&pager_lock);
302 
303 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
304 
305 	cpu_spin_unlock(&pager_lock);
306 	thread_set_exceptions(exceptions);
307 }
308 KEEP_PAGER(area_insert_tail);
309 
310 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
311 			const void *store, const void *hashes)
312 {
313 	struct tee_pager_area *area;
314 	size_t tbl_va_size;
315 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
316 
317 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
318 		base, base + size, flags, store, hashes);
319 
320 	TEE_ASSERT(!(base & SMALL_PAGE_MASK) &&
321 			size && !(size & SMALL_PAGE_MASK));
322 
323 	if (!(flags & TEE_MATTR_PW))
324 		TEE_ASSERT(store && hashes);
325 	else if (flags & TEE_MATTR_PW)
326 		TEE_ASSERT(!store && !hashes);
327 	else
328 		panic();
329 
330 
331 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
332 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
333 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
334 			base, size, ti->va_base, tbl_va_size);
335 		return false;
336 	}
337 
338 	area = alloc_area(base, size, flags, store, hashes);
339 	if (!area)
340 		return false;
341 
342 	area_insert_tail(area);
343 	return true;
344 }
345 
346 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
347 {
348 	struct tee_pager_area *area;
349 
350 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
351 		if (core_is_buffer_inside(va, 1, area->base, area->size))
352 			return area;
353 	}
354 	return NULL;
355 }
356 
357 static uint32_t get_area_mattr(struct tee_pager_area *area)
358 {
359 	return TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
360 	       TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
361 	       TEE_MATTR_SECURE | TEE_MATTR_PR |
362 	       (area->flags & TEE_MATTR_PRWX);
363 }
364 
365 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
366 {
367 	paddr_t pa;
368 	unsigned idx;
369 
370 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
371 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
372 	return pa;
373 }
374 
375 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
376 			void *dst)
377 {
378 	struct pager_aes_gcm_iv iv = {
379 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
380 	};
381 
382 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
383 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
384 }
385 
386 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
387 {
388 	struct pager_aes_gcm_iv iv;
389 
390 	assert((rwp->iv + 1) > rwp->iv);
391 	rwp->iv++;
392 	/*
393 	 * IV is constructed as recommended in section "8.2.1 Deterministic
394 	 * Construction" of "Recommendation for Block Cipher Modes of
395 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
396 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
397 	 */
398 	iv.iv[0] = (vaddr_t)rwp;
399 	iv.iv[1] = rwp->iv >> 32;
400 	iv.iv[2] = rwp->iv;
401 
402 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
403 				   &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE))
404 		panic();
405 }
406 
407 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
408 			void *va_alias)
409 {
410 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
411 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
412 
413 	if (!(area->flags & TEE_MATTR_PW)) {
414 		const void *hash = area->u.hashes + idx * TEE_SHA256_HASH_SIZE;
415 
416 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
417 		incr_ro_hits();
418 
419 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
420 				TEE_SUCCESS) {
421 			EMSG("PH 0x%" PRIxVA " failed", page_va);
422 			panic();
423 		}
424 	} else if (area->flags & TEE_MATTR_LOCKED) {
425 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
426 		memset(va_alias, 0, SMALL_PAGE_SIZE);
427 	} else {
428 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
429 			va_alias, page_va, area->u.rwp[idx].iv);
430 		if (!area->u.rwp[idx].iv)
431 			memset(va_alias, 0, SMALL_PAGE_SIZE);
432 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
433 				       va_alias)) {
434 			EMSG("PH 0x%" PRIxVA " failed", page_va);
435 			panic();
436 		}
437 		incr_rw_hits();
438 	}
439 }
440 
441 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
442 {
443 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
444 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
445 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
446 
447 	assert(!(pmem->area->flags & TEE_MATTR_LOCKED));
448 
449 	if (attr & dirty_bits) {
450 		size_t idx = pmem->pgidx - core_mmu_va2idx(ti,
451 							   pmem->area->base);
452 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
453 
454 		assert(pmem->area->flags & TEE_MATTR_PW);
455 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
456 			     stored_page);
457 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
458 			core_mmu_idx2va(ti, pmem->pgidx),
459 			pmem->area->u.rwp[idx].iv);
460 	}
461 }
462 
463 static bool tee_pager_unhide_page(vaddr_t page_va)
464 {
465 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
466 	struct tee_pager_pmem *pmem;
467 
468 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
469 		paddr_t pa;
470 		uint32_t attr;
471 
472 		if (pmem->pgidx == INVALID_PGIDX)
473 			continue;
474 
475 		core_mmu_get_entry(ti, pmem->pgidx,
476 				   &pa, &attr);
477 
478 		if (!(attr &
479 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
480 			continue;
481 
482 		if (core_mmu_va2idx(ti, page_va) == pmem->pgidx) {
483 			uint32_t a = get_area_mattr(pmem->area);
484 
485 			/* page is hidden, show and move to back */
486 			assert(pa == get_pmem_pa(pmem));
487 			/*
488 			 * If it's not a dirty block, then it should be
489 			 * read only.
490 			 */
491 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
492 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
493 			else
494 				FMSG("Unhide %#" PRIxVA, page_va);
495 			core_mmu_set_entry(ti, pmem->pgidx, pa, a);
496 
497 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
498 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
499 
500 			/* TODO only invalidate entry touched above */
501 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
502 
503 			incr_hidden_hits();
504 			return true;
505 		}
506 	}
507 
508 	return false;
509 }
510 
511 static void tee_pager_hide_pages(void)
512 {
513 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
514 	struct tee_pager_pmem *pmem;
515 	size_t n = 0;
516 
517 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
518 		paddr_t pa;
519 		uint32_t attr;
520 		uint32_t a;
521 
522 		if (n >= TEE_PAGER_NHIDE)
523 			break;
524 		n++;
525 
526 		/*
527 		 * we cannot hide pages when pmem->area is not defined as
528 		 * unhide requires pmem->area to be defined
529 		 */
530 		if (!pmem->area)
531 			continue;
532 
533 		core_mmu_get_entry(ti, pmem->pgidx, &pa, &attr);
534 		if (!(attr & TEE_MATTR_VALID_BLOCK))
535 			continue;
536 
537 		assert(pa == get_pmem_pa(pmem));
538 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
539 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
540 			FMSG("Hide %#" PRIxVA,
541 			     ti->va_base + pmem->pgidx * SMALL_PAGE_SIZE);
542 		} else
543 			a = TEE_MATTR_HIDDEN_BLOCK;
544 		core_mmu_set_entry(ti, pmem->pgidx, pa, a);
545 
546 	}
547 
548 	/* TODO only invalidate entries touched above */
549 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
550 }
551 
552 /*
553  * Find mapped pmem, hide and move to pageble pmem.
554  * Return false if page was not mapped, and true if page was mapped.
555  */
556 static bool tee_pager_release_one_phys(vaddr_t page_va)
557 {
558 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
559 	struct tee_pager_pmem *pmem;
560 	unsigned pgidx;
561 	paddr_t pa;
562 	uint32_t attr;
563 
564 	pgidx = core_mmu_va2idx(ti, page_va);
565 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
566 
567 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
568 
569 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
570 		if (pmem->pgidx != pgidx)
571 			continue;
572 
573 		assert(pa == get_pmem_pa(pmem));
574 		core_mmu_set_entry(ti, pgidx, 0, 0);
575 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
576 		pmem->area = NULL;
577 		pmem->pgidx = INVALID_PGIDX;
578 		tee_pager_npages++;
579 		set_npages();
580 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
581 		incr_zi_released();
582 		return true;
583 	}
584 
585 	return false;
586 }
587 
588 /* Finds the oldest page and unmats it from its old virtual address */
589 static struct tee_pager_pmem *tee_pager_get_page(uint32_t next_area_flags)
590 {
591 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
592 	struct tee_pager_pmem *pmem;
593 
594 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
595 	if (!pmem) {
596 		EMSG("No pmem entries");
597 		return NULL;
598 	}
599 	if (pmem->pgidx != INVALID_PGIDX) {
600 		uint32_t a;
601 
602 		core_mmu_get_entry(ti, pmem->pgidx, NULL, &a);
603 		core_mmu_set_entry(ti, pmem->pgidx, 0, 0);
604 		/* TODO only invalidate entries touched above */
605 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
606 		tee_pager_save_page(pmem, a);
607 	}
608 
609 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
610 	pmem->pgidx = INVALID_PGIDX;
611 	pmem->area = NULL;
612 	if (next_area_flags & TEE_MATTR_LOCKED) {
613 		/* Move page to lock list */
614 		TEE_ASSERT(tee_pager_npages > 0);
615 		tee_pager_npages--;
616 		set_npages();
617 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
618 	} else {
619 		/* move page to back */
620 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
621 	}
622 
623 	return pmem;
624 }
625 
626 static bool pager_update_permissions(struct tee_pager_area *area,
627 			struct abort_info *ai)
628 {
629 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
630 	unsigned pgidx = core_mmu_va2idx(ti, ai->va);
631 	uint32_t attr;
632 	paddr_t pa;
633 
634 	core_mmu_get_entry(ti, pgidx, &pa, &attr);
635 
636 	/* Not mapped */
637 	if (!(attr & TEE_MATTR_VALID_BLOCK))
638 		return false;
639 
640 	/* Not readable, should not happen */
641 	if (!(attr & TEE_MATTR_PR)) {
642 		abort_print_error(ai);
643 		panic();
644 	}
645 
646 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
647 	case CORE_MMU_FAULT_TRANSLATION:
648 	case CORE_MMU_FAULT_READ_PERMISSION:
649 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
650 		    !(attr & TEE_MATTR_PX)) {
651 			/* Attempting to execute from an NOX page */
652 			abort_print_error(ai);
653 			panic();
654 		}
655 		/* Since the page is mapped now it's OK */
656 		return true;
657 	case CORE_MMU_FAULT_WRITE_PERMISSION:
658 		if (!(area->flags & TEE_MATTR_PW)) {
659 			/* Attempting to write to an RO page */
660 			abort_print_error(ai);
661 			panic();
662 		}
663 		if (!(attr & TEE_MATTR_PW)) {
664 			FMSG("Dirty %p", (void *)(ai->va & ~SMALL_PAGE_MASK));
665 			core_mmu_set_entry(ti, pgidx, pa, attr | TEE_MATTR_PW);
666 			/* TODO only invalidate entry above */
667 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
668 		}
669 		/* Since permissions has been updated now it's OK */
670 		return true;
671 	default:
672 		/* Some fault we can't deal with */
673 		abort_print_error(ai);
674 		panic();
675 	}
676 
677 }
678 
679 #ifdef CFG_TEE_CORE_DEBUG
680 static void stat_handle_fault(void)
681 {
682 	static size_t num_faults;
683 	static size_t min_npages = SIZE_MAX;
684 	static size_t total_min_npages = SIZE_MAX;
685 
686 	num_faults++;
687 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
688 		DMSG("nfaults %zu npages %zu (min %zu)",
689 		     num_faults, tee_pager_npages, min_npages);
690 		min_npages = tee_pager_npages; /* reset */
691 	}
692 	if (tee_pager_npages < min_npages)
693 		min_npages = tee_pager_npages;
694 	if (tee_pager_npages < total_min_npages)
695 		total_min_npages = tee_pager_npages;
696 }
697 #else
698 static void stat_handle_fault(void)
699 {
700 }
701 #endif
702 
703 bool tee_pager_handle_fault(struct abort_info *ai)
704 {
705 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
706 	struct tee_pager_area *area;
707 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
708 	uint32_t exceptions;
709 	bool ret;
710 
711 #ifdef TEE_PAGER_DEBUG_PRINT
712 	abort_print(ai);
713 #endif
714 
715 	/*
716 	 * We're updating pages that can affect several active CPUs at a
717 	 * time below. We end up here because a thread tries to access some
718 	 * memory that isn't available. We have to be careful when making
719 	 * that memory available as other threads may succeed in accessing
720 	 * that address the moment after we've made it available.
721 	 *
722 	 * That means that we can't just map the memory and populate the
723 	 * page, instead we use the aliased mapping to populate the page
724 	 * and once everything is ready we map it.
725 	 */
726 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
727 	cpu_spin_lock(&pager_lock);
728 
729 	stat_handle_fault();
730 
731 	/* check if the access is valid */
732 	area = tee_pager_find_area(ai->va);
733 	if (!area) {
734 		EMSG("Invalid addr 0x%" PRIxVA, ai->va);
735 		ret = false;
736 		goto out;
737 	}
738 
739 	if (!tee_pager_unhide_page(page_va)) {
740 		struct tee_pager_pmem *pmem = NULL;
741 		uint32_t attr;
742 
743 		/*
744 		 * The page wasn't hidden, but some other core may have
745 		 * updated the table entry before we got here or we need
746 		 * to make a read-only page read-write (dirty).
747 		 */
748 		if (pager_update_permissions(area, ai)) {
749 			/*
750 			 * Kind of access is OK with the mapping, we're
751 			 * done here because the fault has already been
752 			 * dealt with by another core.
753 			 */
754 			ret = true;
755 			goto out;
756 		}
757 
758 		pmem = tee_pager_get_page(area->flags);
759 		if (!pmem) {
760 			abort_print(ai);
761 			panic();
762 		}
763 
764 		/* load page code & data */
765 		tee_pager_load_page(area, page_va, pmem->va_alias);
766 
767 		/*
768 		 * We've updated the page using the aliased mapping and
769 		 * some cache maintenence is now needed if it's an
770 		 * executable page.
771 		 *
772 		 * Since the d-cache is a Physically-indexed,
773 		 * physically-tagged (PIPT) cache we can clean the aliased
774 		 * address instead of the real virtual address.
775 		 *
776 		 * The i-cache can also be PIPT, but may be something else
777 		 * to, to keep it simple we invalidate the entire i-cache.
778 		 * As a future optimization we may invalidate only the
779 		 * aliased area if it a PIPT cache else the entire cache.
780 		 */
781 		if (area->flags & TEE_MATTR_PX) {
782 			/*
783 			 * Doing these operations to LoUIS (Level of
784 			 * unification, Inner Shareable) would be enough
785 			 */
786 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
787 				pmem->va_alias, SMALL_PAGE_SIZE);
788 
789 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
790 		}
791 
792 		pmem->area = area;
793 		pmem->pgidx = core_mmu_va2idx(ti, ai->va);
794 		attr = get_area_mattr(area) & ~(TEE_MATTR_PW | TEE_MATTR_UW);
795 		core_mmu_set_entry(ti, pmem->pgidx, get_pmem_pa(pmem), attr);
796 
797 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
798 		     core_mmu_idx2va(ti, pmem->pgidx), get_pmem_pa(pmem));
799 
800 	}
801 
802 	tee_pager_hide_pages();
803 	ret = true;
804 out:
805 	cpu_spin_unlock(&pager_lock);
806 	thread_unmask_exceptions(exceptions);
807 	return ret;
808 }
809 
810 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
811 {
812 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
813 	size_t n;
814 
815 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
816 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
817 
818 	/* setup memory */
819 	for (n = 0; n < npages; n++) {
820 		struct tee_pager_pmem *pmem;
821 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
822 		unsigned pgidx = core_mmu_va2idx(ti, va);
823 		paddr_t pa;
824 		uint32_t attr;
825 
826 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
827 
828 		/* Ignore unmapped pages/blocks */
829 		if (!(attr & TEE_MATTR_VALID_BLOCK))
830 			continue;
831 
832 		pmem = malloc(sizeof(struct tee_pager_pmem));
833 		if (pmem == NULL) {
834 			EMSG("Can't allocate memory");
835 			panic();
836 		}
837 
838 		pmem->va_alias = pager_add_alias_page(pa);
839 
840 		if (unmap) {
841 			pmem->area = NULL;
842 			pmem->pgidx = INVALID_PGIDX;
843 			core_mmu_set_entry(ti, pgidx, 0, 0);
844 		} else {
845 			/*
846 			 * The page is still mapped, let's assign the area
847 			 * and update the protection bits accordingly.
848 			 */
849 			pmem->area = tee_pager_find_area(va);
850 			pmem->pgidx = pgidx;
851 			assert(pa == get_pmem_pa(pmem));
852 			core_mmu_set_entry(ti, pgidx, pa,
853 					   get_area_mattr(pmem->area));
854 		}
855 
856 		tee_pager_npages++;
857 		incr_npages_all();
858 		set_npages();
859 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
860 	}
861 
862 	/* Invalidate secure TLB */
863 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
864 }
865 
866 void tee_pager_release_phys(void *addr, size_t size)
867 {
868 	bool unmaped = false;
869 	vaddr_t va = (vaddr_t)addr;
870 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
871 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
872 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
873 
874 	cpu_spin_lock(&pager_lock);
875 
876 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
877 		unmaped |= tee_pager_release_one_phys(va);
878 
879 	/* Invalidate secure TLB */
880 	if (unmaped)
881 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
882 
883 	cpu_spin_unlock(&pager_lock);
884 	thread_set_exceptions(exceptions);
885 }
886 KEEP_PAGER(tee_pager_release_phys);
887 
888 void *tee_pager_alloc(size_t size, uint32_t flags)
889 {
890 	tee_mm_entry_t *mm;
891 	uint32_t f = TEE_MATTR_PRW | (flags & TEE_MATTR_LOCKED);
892 
893 	if (!size)
894 		return NULL;
895 
896 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
897 	if (!mm)
898 		return NULL;
899 
900 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
901 				f, NULL, NULL);
902 
903 	return (void *)tee_mm_get_smem(mm);
904 }
905