xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 39a6336d8f7647cb2ed723e9e050c2b4c4a98db8)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_mmu.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 
48 struct tee_pager_area {
49 	const uint8_t *hashes;
50 	const uint8_t *store;
51 	uint32_t flags;
52 	tee_mm_entry_t *mm;
53 	TAILQ_ENTRY(tee_pager_area) link;
54 };
55 
56 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
57 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
58 
59 /*
60  * struct tee_pager_pmem - Represents a physical page used for paging.
61  *
62  * @pgidx	an index of the entry in tee_pager_tbl_info. The actual physical
63  *		address is stored here so even if the page isn't mapped,
64  *		there's always an MMU entry holding the physical address.
65  *
66  * @va_alias	Virtual address where the physical page always is aliased.
67  *		Used during remapping of the page when the content need to
68  *		be updated before it's available at the new location.
69  *
70  * @area	a pointer to the pager area
71  */
72 struct tee_pager_pmem {
73 	unsigned pgidx;
74 	void *va_alias;
75 	struct tee_pager_area *area;
76 	TAILQ_ENTRY(tee_pager_pmem) link;
77 };
78 
79 /* The list of physical pages. The first page in the list is the oldest */
80 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
81 
82 static struct tee_pager_pmem_head tee_pager_pmem_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
84 
85 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
86 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
87 
88 /* number of pages hidden */
89 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
90 
91 /* Number of registered physical pages, used hiding pages. */
92 static size_t tee_pager_npages;
93 
94 #ifdef CFG_WITH_STATS
95 static struct tee_pager_stats pager_stats;
96 
97 static inline void incr_ro_hits(void)
98 {
99 	pager_stats.ro_hits++;
100 }
101 
102 static inline void incr_rw_hits(void)
103 {
104 	pager_stats.rw_hits++;
105 }
106 
107 static inline void incr_hidden_hits(void)
108 {
109 	pager_stats.hidden_hits++;
110 }
111 
112 static inline void incr_zi_released(void)
113 {
114 	pager_stats.zi_released++;
115 }
116 
117 static inline void incr_npages_all(void)
118 {
119 	pager_stats.npages_all++;
120 }
121 
122 static inline void set_npages(void)
123 {
124 	pager_stats.npages = tee_pager_npages;
125 }
126 
127 void tee_pager_get_stats(struct tee_pager_stats *stats)
128 {
129 	*stats = pager_stats;
130 
131 	pager_stats.hidden_hits = 0;
132 	pager_stats.ro_hits = 0;
133 	pager_stats.rw_hits = 0;
134 	pager_stats.zi_released = 0;
135 }
136 
137 #else /* CFG_WITH_STATS */
138 static inline void incr_ro_hits(void) { }
139 static inline void incr_rw_hits(void) { }
140 static inline void incr_hidden_hits(void) { }
141 static inline void incr_zi_released(void) { }
142 static inline void incr_npages_all(void) { }
143 static inline void set_npages(void) { }
144 
145 void tee_pager_get_stats(struct tee_pager_stats *stats)
146 {
147 	memset(stats, 0, sizeof(struct tee_pager_stats));
148 }
149 #endif /* CFG_WITH_STATS */
150 
151 struct core_mmu_table_info tee_pager_tbl_info;
152 
153 static unsigned pager_lock = SPINLOCK_UNLOCK;
154 
155 /* Defines the range of the alias area */
156 static tee_mm_entry_t *pager_alias_area;
157 /*
158  * Physical pages are added in a stack like fashion to the alias area,
159  * @pager_alias_next_free gives the address of next free entry if
160  * @pager_alias_next_free is != 0
161  */
162 static uintptr_t pager_alias_next_free;
163 
164 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
165 {
166 	struct core_mmu_table_info ti;
167 	size_t tbl_va_size;
168 	unsigned idx;
169 	unsigned last_idx;
170 	vaddr_t smem = tee_mm_get_smem(mm);
171 	size_t nbytes = tee_mm_get_bytes(mm);
172 
173 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
174 
175 	TEE_ASSERT(!pager_alias_area);
176 	if (!core_mmu_find_table(smem, UINT_MAX, &ti)) {
177 		DMSG("Can't find translation table");
178 		panic();
179 	}
180 	if ((1 << ti.shift) != SMALL_PAGE_SIZE) {
181 		DMSG("Unsupported page size in translation table %u",
182 		     1 << ti.shift);
183 		panic();
184 	}
185 
186 	tbl_va_size = (1 << ti.shift) * ti.num_entries;
187 	if (!core_is_buffer_inside(smem, nbytes,
188 				   ti.va_base, tbl_va_size)) {
189 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
190 			smem, nbytes, ti.va_base, tbl_va_size);
191 		panic();
192 	}
193 
194 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
195 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
196 
197 	pager_alias_area = mm;
198 	pager_alias_next_free = smem;
199 
200 	/* Clear all mapping in the alias area */
201 	idx = core_mmu_va2idx(&ti, smem);
202 	last_idx = core_mmu_va2idx(&ti, smem + nbytes);
203 	for (; idx < last_idx; idx++)
204 		core_mmu_set_entry(&ti, idx, 0, 0);
205 }
206 
207 static void *pager_add_alias_page(paddr_t pa)
208 {
209 	unsigned idx;
210 	struct core_mmu_table_info ti;
211 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
212 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
213 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
214 
215 	DMSG("0x%" PRIxPA, pa);
216 
217 	TEE_ASSERT(pager_alias_next_free);
218 	if (!core_mmu_find_table(pager_alias_next_free, UINT_MAX, &ti))
219 		panic();
220 	idx = core_mmu_va2idx(&ti, pager_alias_next_free);
221 	core_mmu_set_entry(&ti, idx, pa, attr);
222 	pager_alias_next_free += SMALL_PAGE_SIZE;
223 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
224 				      tee_mm_get_bytes(pager_alias_area)))
225 		pager_alias_next_free = 0;
226 	return (void *)core_mmu_idx2va(&ti, idx);
227 }
228 
229 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
230 		const void *hashes)
231 {
232 	struct tee_pager_area *area;
233 	size_t tbl_va_size;
234 	uint32_t exceptions;
235 
236 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
237 		tee_mm_get_smem(mm),
238 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
239 		flags, store, hashes);
240 
241 	if (flags & TEE_PAGER_AREA_RO)
242 		TEE_ASSERT(store && hashes);
243 	else if (flags & TEE_PAGER_AREA_RW)
244 		TEE_ASSERT(!store && !hashes);
245 	else
246 		panic();
247 
248 	if (!tee_pager_tbl_info.num_entries) {
249 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
250 					&tee_pager_tbl_info))
251 			return false;
252 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
253 			DMSG("Unsupported page size in translation table %u",
254 			     1 << tee_pager_tbl_info.shift);
255 			return false;
256 		}
257 	}
258 
259 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
260 			tee_pager_tbl_info.num_entries;
261 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
262 				   tee_pager_tbl_info.va_base, tbl_va_size)) {
263 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
264 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
265 			tee_pager_tbl_info.va_base, tbl_va_size);
266 		return false;
267 	}
268 
269 	area = malloc(sizeof(struct tee_pager_area));
270 	if (!area)
271 		return false;
272 
273 	area->mm = mm;
274 	area->flags = flags;
275 	area->store = store;
276 	area->hashes = hashes;
277 
278 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
279 	cpu_spin_lock(&pager_lock);
280 
281 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
282 
283 	cpu_spin_unlock(&pager_lock);
284 	thread_set_exceptions(exceptions);
285 	return true;
286 }
287 
288 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
289 {
290 	struct tee_pager_area *area;
291 
292 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
293 		tee_mm_entry_t *mm = area->mm;
294 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
295 
296 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
297 			return area;
298 	}
299 	return NULL;
300 }
301 
302 static uint32_t get_area_mattr(struct tee_pager_area *area)
303 {
304 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
305 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
306 			TEE_MATTR_SECURE | TEE_MATTR_PR;
307 
308 	if (!(area->flags & TEE_PAGER_AREA_RO))
309 		attr |= TEE_MATTR_PW;
310 	if (area->flags & TEE_PAGER_AREA_X)
311 		attr |= TEE_MATTR_PX;
312 
313 	return attr;
314 }
315 
316 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
317 			void *va_alias)
318 {
319 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
320 
321 	if (area->store) {
322 		size_t rel_pg_idx = pg_idx - area->mm->offset;
323 		const void *stored_page = area->store +
324 					  rel_pg_idx * SMALL_PAGE_SIZE;
325 
326 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
327 		incr_ro_hits();
328 	} else {
329 		memset(va_alias, 0, SMALL_PAGE_SIZE);
330 		incr_rw_hits();
331 	}
332 }
333 
334 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
335 			void *va_alias)
336 {
337 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
338 
339 	if (area->store) {
340 		size_t rel_pg_idx = pg_idx - area->mm->offset;
341 		const void *hash = area->hashes +
342 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
343 
344 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
345 				TEE_SUCCESS) {
346 			EMSG("PH 0x%" PRIxVA " failed", page_va);
347 			panic();
348 		}
349 	}
350 }
351 
352 static bool tee_pager_unhide_page(vaddr_t page_va)
353 {
354 	struct tee_pager_pmem *pmem;
355 
356 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
357 		paddr_t pa;
358 		uint32_t attr;
359 
360 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
361 				   &pa, &attr);
362 
363 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
364 			continue;
365 
366 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
367 		    pmem->pgidx) {
368 			/* page is hidden, show and move to back */
369 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
370 					   get_area_mattr(pmem->area));
371 
372 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
373 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
374 
375 			/* TODO only invalidate entry touched above */
376 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
377 
378 			incr_hidden_hits();
379 			return true;
380 		}
381 	}
382 
383 	return false;
384 }
385 
386 static void tee_pager_hide_pages(void)
387 {
388 	struct tee_pager_pmem *pmem;
389 	size_t n = 0;
390 
391 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
392 		paddr_t pa;
393 		uint32_t attr;
394 
395 		if (n >= TEE_PAGER_NHIDE)
396 			break;
397 		n++;
398 
399 		/*
400 		 * we cannot hide pages when pmem->area is not defined as
401 		 * unhide requires pmem->area to be defined
402 		 */
403 		if (!pmem->area)
404 			continue;
405 
406 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
407 				   &pa, &attr);
408 		if (!(attr & TEE_MATTR_VALID_BLOCK))
409 			continue;
410 
411 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
412 				   TEE_MATTR_HIDDEN_BLOCK);
413 
414 	}
415 
416 	/* TODO only invalidate entries touched above */
417 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
418 }
419 
420 /*
421  * Find mapped pmem, hide and move to pageble pmem.
422  * Return false if page was not mapped, and true if page was mapped.
423  */
424 static bool tee_pager_release_one_zi(vaddr_t page_va)
425 {
426 	struct tee_pager_pmem *pmem;
427 	unsigned pgidx;
428 	paddr_t pa;
429 	uint32_t attr;
430 
431 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
432 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
433 
434 #ifdef TEE_PAGER_DEBUG_PRINT
435 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
436 #endif
437 
438 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
439 		if (pmem->pgidx != pgidx)
440 			continue;
441 
442 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
443 				   TEE_MATTR_PHYS_BLOCK);
444 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
445 		tee_pager_npages++;
446 		set_npages();
447 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
448 		incr_zi_released();
449 
450 
451 		return true;
452 	}
453 
454 	return false;
455 }
456 
457 /* Finds the oldest page and remaps it for the new virtual address */
458 static bool tee_pager_get_page(struct abort_info *ai,
459 			struct tee_pager_area *area,
460 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
461 {
462 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
463 	struct tee_pager_pmem *pmem;
464 	paddr_t pa;
465 	uint32_t attr;
466 
467 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
468 
469 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
470 
471 	if (attr & TEE_MATTR_PHYS_BLOCK) {
472 		/*
473 		 * There's an pmem entry using this mmu entry, let's use
474 		 * that entry in the new mapping.
475 		 */
476 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
477 			if (pmem->pgidx == pgidx)
478 				break;
479 		}
480 		if (!pmem) {
481 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
482 			return false;
483 		}
484 	} else {
485 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
486 		if (!pmem) {
487 			DMSG("No pmem entries");
488 			return false;
489 		}
490 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
491 				   &pa, &attr);
492 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
493 	}
494 
495 	pmem->pgidx = pgidx;
496 	pmem->area = area;
497 	core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
498 			   TEE_MATTR_PHYS_BLOCK);
499 
500 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
501 	if (area->store) {
502 		/* move page to back */
503 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
504 	} else {
505 		/* Move page to rw list */
506 		TEE_ASSERT(tee_pager_npages > 0);
507 		tee_pager_npages--;
508 		set_npages();
509 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
510 	}
511 
512 	/* TODO only invalidate entries touched above */
513 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
514 
515 	*pmem_ret = pmem;
516 	*pa_ret = pa;
517 	return true;
518 }
519 
520 static bool pager_check_access(struct abort_info *ai)
521 {
522 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
523 	uint32_t attr;
524 
525 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, NULL, &attr);
526 
527 	/* Not mapped */
528 	if (!(attr & TEE_MATTR_VALID_BLOCK))
529 		return false;
530 
531 	/* Not readable, should not happen */
532 	if (!(attr & TEE_MATTR_PR)) {
533 		abort_print_error(ai);
534 		panic();
535 	}
536 
537 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
538 	case CORE_MMU_FAULT_TRANSLATION:
539 	case CORE_MMU_FAULT_READ_PERMISSION:
540 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
541 		    !(attr & TEE_MATTR_PX)) {
542 			/* Attempting to execute from an NOX page */
543 			abort_print_error(ai);
544 			panic();
545 		}
546 		/* Since the page is mapped now it's OK */
547 		return true;
548 	case CORE_MMU_FAULT_WRITE_PERMISSION:
549 		if (!(attr & TEE_MATTR_PW)) {
550 			/* Attempting to write to an RO page */
551 			abort_print_error(ai);
552 			panic();
553 		}
554 		return true;
555 	default:
556 		/* Some fault we can't deal with */
557 		abort_print_error(ai);
558 		panic();
559 	}
560 
561 }
562 
563 void tee_pager_handle_fault(struct abort_info *ai)
564 {
565 	struct tee_pager_area *area;
566 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
567 	uint32_t exceptions;
568 
569 #ifdef TEE_PAGER_DEBUG_PRINT
570 	abort_print(ai);
571 #endif
572 
573 	/*
574 	 * We're updating pages that can affect several active CPUs at a
575 	 * time below. We end up here because a thread tries to access some
576 	 * memory that isn't available. We have to be careful when making
577 	 * that memory available as other threads may succeed in accessing
578 	 * that address the moment after we've made it available.
579 	 *
580 	 * That means that we can't just map the memory and populate the
581 	 * page, instead we use the aliased mapping to populate the page
582 	 * and once everything is ready we map it.
583 	 */
584 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
585 	cpu_spin_lock(&pager_lock);
586 
587 	/* check if the access is valid */
588 	area = tee_pager_find_area(ai->va);
589 	if (!area) {
590 		abort_print_error(ai);
591 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
592 		panic();
593 	}
594 
595 	if (!tee_pager_unhide_page(page_va)) {
596 		struct tee_pager_pmem *pmem = NULL;
597 		paddr_t pa = 0;
598 
599 		/*
600 		 * The page wasn't hidden, but some other core may have
601 		 * updated the table entry before we got here.
602 		 */
603 		if (pager_check_access(ai)) {
604 			/*
605 			 * Kind of access is OK with the mapping, we're
606 			 * done here because the fault has already been
607 			 * dealt with by another core.
608 			 */
609 			goto out;
610 		}
611 
612 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
613 			abort_print(ai);
614 			panic();
615 		}
616 
617 		/* load page code & data */
618 		tee_pager_load_page(area, page_va, pmem->va_alias);
619 		tee_pager_verify_page(area, page_va, pmem->va_alias);
620 
621 		/*
622 		 * We've updated the page using the aliased mapping and
623 		 * some cache maintenence is now needed if it's an
624 		 * executable page.
625 		 *
626 		 * Since the d-cache is a Physically-indexed,
627 		 * physically-tagged (PIPT) cache we can clean the aliased
628 		 * address instead of the real virtual address.
629 		 *
630 		 * The i-cache can also be PIPT, but may be something else
631 		 * to, to keep it simple we invalidate the entire i-cache.
632 		 * As a future optimization we may invalidate only the
633 		 * aliased area if it a PIPT cache else the entire cache.
634 		 */
635 		if (area->flags & TEE_PAGER_AREA_X) {
636 			/*
637 			 * Doing these operations to LoUIS (Level of
638 			 * unification, Inner Shareable) would be enough
639 			 */
640 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
641 				pmem->va_alias, SMALL_PAGE_SIZE);
642 
643 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
644 		}
645 
646 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
647 				   get_area_mattr(area));
648 
649 #ifdef TEE_PAGER_DEBUG_PRINT
650 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
651 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx), pa);
652 #endif
653 
654 	}
655 
656 	tee_pager_hide_pages();
657 out:
658 	cpu_spin_unlock(&pager_lock);
659 	thread_unmask_exceptions(exceptions);
660 }
661 
662 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
663 {
664 	size_t n;
665 
666 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
667 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
668 
669 	/* setup memory */
670 	for (n = 0; n < npages; n++) {
671 		struct tee_pager_pmem *pmem;
672 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
673 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
674 		paddr_t pa;
675 		uint32_t attr;
676 
677 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
678 
679 		/* Ignore unmapped pages/blocks */
680 		if (!(attr & TEE_MATTR_VALID_BLOCK))
681 			continue;
682 
683 		pmem = malloc(sizeof(struct tee_pager_pmem));
684 		if (pmem == NULL) {
685 			DMSG("Can't allocate memory");
686 			panic();
687 		}
688 
689 		pmem->pgidx = pgidx;
690 		pmem->va_alias = pager_add_alias_page(pa);
691 
692 		if (unmap) {
693 			/*
694 			 * Note that we're making the page inaccessible
695 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
696 			 * indicate that the descriptor still holds a valid
697 			 * physical address of a page.
698 			 */
699 			pmem->area = NULL;
700 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
701 					   TEE_MATTR_PHYS_BLOCK);
702 		} else {
703 			/*
704 			 * The page is still mapped, let's assign the area
705 			 * and update the protection bits accordingly.
706 			 */
707 			pmem->area = tee_pager_find_area(va);
708 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
709 					   get_area_mattr(pmem->area));
710 		}
711 
712 		tee_pager_npages++;
713 		incr_npages_all();
714 		set_npages();
715 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
716 	}
717 
718 	/* Invalidate secure TLB */
719 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
720 }
721 
722 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
723 {
724 	bool unmaped = false;
725 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
726 
727 	cpu_spin_lock(&pager_lock);
728 
729 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
730 		panic();
731 
732 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
733 		unmaped |= tee_pager_release_one_zi(vaddr);
734 
735 	/* Invalidate secure TLB */
736 	if (unmaped)
737 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
738 
739 	cpu_spin_unlock(&pager_lock);
740 	thread_set_exceptions(exceptions);
741 }
742 
743 void *tee_pager_request_zi(size_t size)
744 {
745 	tee_mm_entry_t *mm;
746 
747 	if (!size)
748 		return NULL;
749 
750 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
751 	if (!mm)
752 		return NULL;
753 
754 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
755 
756 	return (void *)tee_mm_get_smem(mm);
757 }
758