xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision d5fdcda556e1767161c78e9f730450c9f0c5621e)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_mmu.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 
48 struct tee_pager_area {
49 	const uint8_t *hashes;
50 	const uint8_t *store;
51 	uint32_t flags;
52 	tee_mm_entry_t *mm;
53 	TAILQ_ENTRY(tee_pager_area) link;
54 };
55 
56 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
57 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
58 
59 #define INVALID_PGIDX	UINT_MAX
60 
61 /*
62  * struct tee_pager_pmem - Represents a physical page used for paging.
63  *
64  * @pgidx	an index of the entry in tee_pager_tbl_info.
65  * @va_alias	Virtual address where the physical page always is aliased.
66  *		Used during remapping of the page when the content need to
67  *		be updated before it's available at the new location.
68  * @area	a pointer to the pager area
69  */
70 struct tee_pager_pmem {
71 	unsigned pgidx;
72 	void *va_alias;
73 	struct tee_pager_area *area;
74 	TAILQ_ENTRY(tee_pager_pmem) link;
75 };
76 
77 /* The list of physical pages. The first page in the list is the oldest */
78 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
79 
80 static struct tee_pager_pmem_head tee_pager_pmem_head =
81 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
82 
83 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
84 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
85 
86 /* number of pages hidden */
87 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
88 
89 /* Number of registered physical pages, used hiding pages. */
90 static size_t tee_pager_npages;
91 
92 #ifdef CFG_WITH_STATS
93 static struct tee_pager_stats pager_stats;
94 
95 static inline void incr_ro_hits(void)
96 {
97 	pager_stats.ro_hits++;
98 }
99 
100 static inline void incr_rw_hits(void)
101 {
102 	pager_stats.rw_hits++;
103 }
104 
105 static inline void incr_hidden_hits(void)
106 {
107 	pager_stats.hidden_hits++;
108 }
109 
110 static inline void incr_zi_released(void)
111 {
112 	pager_stats.zi_released++;
113 }
114 
115 static inline void incr_npages_all(void)
116 {
117 	pager_stats.npages_all++;
118 }
119 
120 static inline void set_npages(void)
121 {
122 	pager_stats.npages = tee_pager_npages;
123 }
124 
125 void tee_pager_get_stats(struct tee_pager_stats *stats)
126 {
127 	*stats = pager_stats;
128 
129 	pager_stats.hidden_hits = 0;
130 	pager_stats.ro_hits = 0;
131 	pager_stats.rw_hits = 0;
132 	pager_stats.zi_released = 0;
133 }
134 
135 #else /* CFG_WITH_STATS */
136 static inline void incr_ro_hits(void) { }
137 static inline void incr_rw_hits(void) { }
138 static inline void incr_hidden_hits(void) { }
139 static inline void incr_zi_released(void) { }
140 static inline void incr_npages_all(void) { }
141 static inline void set_npages(void) { }
142 
143 void tee_pager_get_stats(struct tee_pager_stats *stats)
144 {
145 	memset(stats, 0, sizeof(struct tee_pager_stats));
146 }
147 #endif /* CFG_WITH_STATS */
148 
149 struct core_mmu_table_info tee_pager_tbl_info;
150 static struct core_mmu_table_info pager_alias_tbl_info;
151 
152 static unsigned pager_lock = SPINLOCK_UNLOCK;
153 
154 /* Defines the range of the alias area */
155 static tee_mm_entry_t *pager_alias_area;
156 /*
157  * Physical pages are added in a stack like fashion to the alias area,
158  * @pager_alias_next_free gives the address of next free entry if
159  * @pager_alias_next_free is != 0
160  */
161 static uintptr_t pager_alias_next_free;
162 
163 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
164 {
165 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
166 	size_t tbl_va_size;
167 	unsigned idx;
168 	unsigned last_idx;
169 	vaddr_t smem = tee_mm_get_smem(mm);
170 	size_t nbytes = tee_mm_get_bytes(mm);
171 
172 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
173 
174 	TEE_ASSERT(!pager_alias_area);
175 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
176 		DMSG("Can't find translation table");
177 		panic();
178 	}
179 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
180 		DMSG("Unsupported page size in translation table %u",
181 		     1 << ti->shift);
182 		panic();
183 	}
184 
185 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
186 	if (!core_is_buffer_inside(smem, nbytes,
187 				   ti->va_base, tbl_va_size)) {
188 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
189 			smem, nbytes, ti->va_base, tbl_va_size);
190 		panic();
191 	}
192 
193 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
194 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
195 
196 	pager_alias_area = mm;
197 	pager_alias_next_free = smem;
198 
199 	/* Clear all mapping in the alias area */
200 	idx = core_mmu_va2idx(ti, smem);
201 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
202 	for (; idx < last_idx; idx++)
203 		core_mmu_set_entry(ti, idx, 0, 0);
204 
205 	/* TODO only invalidate entries touched above */
206 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
207 }
208 
209 static void *pager_add_alias_page(paddr_t pa)
210 {
211 	unsigned idx;
212 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
213 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
214 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
215 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
216 
217 	DMSG("0x%" PRIxPA, pa);
218 
219 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
220 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
221 	core_mmu_set_entry(ti, idx, pa, attr);
222 	pager_alias_next_free += SMALL_PAGE_SIZE;
223 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
224 				      tee_mm_get_bytes(pager_alias_area)))
225 		pager_alias_next_free = 0;
226 	return (void *)core_mmu_idx2va(ti, idx);
227 }
228 
229 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
230 		const void *hashes)
231 {
232 	struct tee_pager_area *area;
233 	size_t tbl_va_size;
234 	uint32_t exceptions;
235 
236 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
237 		tee_mm_get_smem(mm),
238 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
239 		flags, store, hashes);
240 
241 	if (flags & TEE_PAGER_AREA_RO)
242 		TEE_ASSERT(store && hashes);
243 	else if (flags & TEE_PAGER_AREA_RW)
244 		TEE_ASSERT(!store && !hashes);
245 	else
246 		panic();
247 
248 	if (!tee_pager_tbl_info.num_entries) {
249 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
250 					&tee_pager_tbl_info))
251 			return false;
252 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
253 			DMSG("Unsupported page size in translation table %u",
254 			     1 << tee_pager_tbl_info.shift);
255 			return false;
256 		}
257 	}
258 
259 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
260 			tee_pager_tbl_info.num_entries;
261 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
262 				   tee_pager_tbl_info.va_base, tbl_va_size)) {
263 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
264 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
265 			tee_pager_tbl_info.va_base, tbl_va_size);
266 		return false;
267 	}
268 
269 	area = malloc(sizeof(struct tee_pager_area));
270 	if (!area)
271 		return false;
272 
273 	area->mm = mm;
274 	area->flags = flags;
275 	area->store = store;
276 	area->hashes = hashes;
277 
278 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
279 	cpu_spin_lock(&pager_lock);
280 
281 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
282 
283 	cpu_spin_unlock(&pager_lock);
284 	thread_set_exceptions(exceptions);
285 	return true;
286 }
287 
288 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
289 {
290 	struct tee_pager_area *area;
291 
292 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
293 		tee_mm_entry_t *mm = area->mm;
294 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
295 
296 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
297 			return area;
298 	}
299 	return NULL;
300 }
301 
302 static uint32_t get_area_mattr(struct tee_pager_area *area)
303 {
304 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
305 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
306 			TEE_MATTR_SECURE | TEE_MATTR_PR;
307 
308 	if (!(area->flags & TEE_PAGER_AREA_RO))
309 		attr |= TEE_MATTR_PW;
310 	if (area->flags & TEE_PAGER_AREA_X)
311 		attr |= TEE_MATTR_PX;
312 
313 	return attr;
314 }
315 
316 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
317 {
318 	paddr_t pa;
319 	unsigned idx;
320 
321 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
322 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
323 	return pa;
324 }
325 
326 
327 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
328 			void *va_alias)
329 {
330 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
331 
332 	if (area->store) {
333 		size_t rel_pg_idx = pg_idx - area->mm->offset;
334 		const void *stored_page = area->store +
335 					  rel_pg_idx * SMALL_PAGE_SIZE;
336 
337 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
338 		incr_ro_hits();
339 	} else {
340 		memset(va_alias, 0, SMALL_PAGE_SIZE);
341 		incr_rw_hits();
342 	}
343 }
344 
345 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
346 			void *va_alias)
347 {
348 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
349 
350 	if (area->store) {
351 		size_t rel_pg_idx = pg_idx - area->mm->offset;
352 		const void *hash = area->hashes +
353 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
354 
355 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
356 				TEE_SUCCESS) {
357 			EMSG("PH 0x%" PRIxVA " failed", page_va);
358 			panic();
359 		}
360 	}
361 }
362 
363 static bool tee_pager_unhide_page(vaddr_t page_va)
364 {
365 	struct tee_pager_pmem *pmem;
366 
367 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
368 		paddr_t pa;
369 		uint32_t attr;
370 
371 		if (pmem->pgidx == INVALID_PGIDX)
372 			continue;
373 
374 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
375 				   &pa, &attr);
376 
377 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
378 			continue;
379 
380 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
381 		    pmem->pgidx) {
382 			/* page is hidden, show and move to back */
383 			assert(pa == get_pmem_pa(pmem));
384 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
385 					   get_area_mattr(pmem->area));
386 
387 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
388 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
389 
390 			/* TODO only invalidate entry touched above */
391 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
392 
393 			incr_hidden_hits();
394 			return true;
395 		}
396 	}
397 
398 	return false;
399 }
400 
401 static void tee_pager_hide_pages(void)
402 {
403 	struct tee_pager_pmem *pmem;
404 	size_t n = 0;
405 
406 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
407 		paddr_t pa;
408 		uint32_t attr;
409 
410 		if (n >= TEE_PAGER_NHIDE)
411 			break;
412 		n++;
413 
414 		/*
415 		 * we cannot hide pages when pmem->area is not defined as
416 		 * unhide requires pmem->area to be defined
417 		 */
418 		if (!pmem->area)
419 			continue;
420 
421 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
422 				   &pa, &attr);
423 		if (!(attr & TEE_MATTR_VALID_BLOCK))
424 			continue;
425 
426 		assert(pa == get_pmem_pa(pmem));
427 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
428 				   TEE_MATTR_HIDDEN_BLOCK);
429 
430 	}
431 
432 	/* TODO only invalidate entries touched above */
433 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
434 }
435 
436 /*
437  * Find mapped pmem, hide and move to pageble pmem.
438  * Return false if page was not mapped, and true if page was mapped.
439  */
440 static bool tee_pager_release_one_zi(vaddr_t page_va)
441 {
442 	struct tee_pager_pmem *pmem;
443 	unsigned pgidx;
444 	paddr_t pa;
445 	uint32_t attr;
446 
447 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
448 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
449 
450 #ifdef TEE_PAGER_DEBUG_PRINT
451 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
452 #endif
453 
454 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
455 		if (pmem->pgidx != pgidx)
456 			continue;
457 
458 		assert(pa == get_pmem_pa(pmem));
459 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
460 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
461 		pmem->area = NULL;
462 		pmem->pgidx = INVALID_PGIDX;
463 		tee_pager_npages++;
464 		set_npages();
465 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
466 		incr_zi_released();
467 
468 
469 		return true;
470 	}
471 
472 	return false;
473 }
474 
475 /* Finds the oldest page and remaps it for the new virtual address */
476 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
477 {
478 	struct tee_pager_pmem *pmem;
479 
480 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
481 	if (!pmem) {
482 		DMSG("No pmem entries");
483 		return NULL;
484 	}
485 	if (pmem->pgidx != INVALID_PGIDX) {
486 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
487 		/* TODO only invalidate entries touched above */
488 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
489 	}
490 
491 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
492 	pmem->pgidx = INVALID_PGIDX;
493 	pmem->area = NULL;
494 	if (area->store) {
495 		/* move page to back */
496 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
497 	} else {
498 		/* Move page to rw list */
499 		TEE_ASSERT(tee_pager_npages > 0);
500 		tee_pager_npages--;
501 		set_npages();
502 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
503 	}
504 
505 	return pmem;
506 }
507 
508 static bool pager_check_access(struct abort_info *ai)
509 {
510 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
511 	uint32_t attr;
512 
513 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, NULL, &attr);
514 
515 	/* Not mapped */
516 	if (!(attr & TEE_MATTR_VALID_BLOCK))
517 		return false;
518 
519 	/* Not readable, should not happen */
520 	if (!(attr & TEE_MATTR_PR)) {
521 		abort_print_error(ai);
522 		panic();
523 	}
524 
525 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
526 	case CORE_MMU_FAULT_TRANSLATION:
527 	case CORE_MMU_FAULT_READ_PERMISSION:
528 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
529 		    !(attr & TEE_MATTR_PX)) {
530 			/* Attempting to execute from an NOX page */
531 			abort_print_error(ai);
532 			panic();
533 		}
534 		/* Since the page is mapped now it's OK */
535 		return true;
536 	case CORE_MMU_FAULT_WRITE_PERMISSION:
537 		if (!(attr & TEE_MATTR_PW)) {
538 			/* Attempting to write to an RO page */
539 			abort_print_error(ai);
540 			panic();
541 		}
542 		return true;
543 	default:
544 		/* Some fault we can't deal with */
545 		abort_print_error(ai);
546 		panic();
547 	}
548 
549 }
550 
551 void tee_pager_handle_fault(struct abort_info *ai)
552 {
553 	struct tee_pager_area *area;
554 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
555 	uint32_t exceptions;
556 
557 #ifdef TEE_PAGER_DEBUG_PRINT
558 	abort_print(ai);
559 #endif
560 
561 	/*
562 	 * We're updating pages that can affect several active CPUs at a
563 	 * time below. We end up here because a thread tries to access some
564 	 * memory that isn't available. We have to be careful when making
565 	 * that memory available as other threads may succeed in accessing
566 	 * that address the moment after we've made it available.
567 	 *
568 	 * That means that we can't just map the memory and populate the
569 	 * page, instead we use the aliased mapping to populate the page
570 	 * and once everything is ready we map it.
571 	 */
572 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
573 	cpu_spin_lock(&pager_lock);
574 
575 	/* check if the access is valid */
576 	area = tee_pager_find_area(ai->va);
577 	if (!area) {
578 		abort_print_error(ai);
579 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
580 		panic();
581 	}
582 
583 	if (!tee_pager_unhide_page(page_va)) {
584 		struct tee_pager_pmem *pmem = NULL;
585 
586 		/*
587 		 * The page wasn't hidden, but some other core may have
588 		 * updated the table entry before we got here.
589 		 */
590 		if (pager_check_access(ai)) {
591 			/*
592 			 * Kind of access is OK with the mapping, we're
593 			 * done here because the fault has already been
594 			 * dealt with by another core.
595 			 */
596 			goto out;
597 		}
598 
599 		pmem = tee_pager_get_page(area);
600 		if (!pmem) {
601 			abort_print(ai);
602 			panic();
603 		}
604 
605 		/* load page code & data */
606 		tee_pager_load_page(area, page_va, pmem->va_alias);
607 		tee_pager_verify_page(area, page_va, pmem->va_alias);
608 
609 		/*
610 		 * We've updated the page using the aliased mapping and
611 		 * some cache maintenence is now needed if it's an
612 		 * executable page.
613 		 *
614 		 * Since the d-cache is a Physically-indexed,
615 		 * physically-tagged (PIPT) cache we can clean the aliased
616 		 * address instead of the real virtual address.
617 		 *
618 		 * The i-cache can also be PIPT, but may be something else
619 		 * to, to keep it simple we invalidate the entire i-cache.
620 		 * As a future optimization we may invalidate only the
621 		 * aliased area if it a PIPT cache else the entire cache.
622 		 */
623 		if (area->flags & TEE_PAGER_AREA_X) {
624 			/*
625 			 * Doing these operations to LoUIS (Level of
626 			 * unification, Inner Shareable) would be enough
627 			 */
628 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
629 				pmem->va_alias, SMALL_PAGE_SIZE);
630 
631 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
632 		}
633 
634 		pmem->area = area;
635 		pmem->pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
636 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx,
637 				   get_pmem_pa(pmem), get_area_mattr(area));
638 
639 #ifdef TEE_PAGER_DEBUG_PRINT
640 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
641 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx),
642 				     get_pmem_pa(pmem));
643 #endif
644 
645 	}
646 
647 	tee_pager_hide_pages();
648 out:
649 	cpu_spin_unlock(&pager_lock);
650 	thread_unmask_exceptions(exceptions);
651 }
652 
653 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
654 {
655 	size_t n;
656 
657 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
658 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
659 
660 	/* setup memory */
661 	for (n = 0; n < npages; n++) {
662 		struct tee_pager_pmem *pmem;
663 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
664 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
665 		paddr_t pa;
666 		uint32_t attr;
667 
668 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
669 
670 		/* Ignore unmapped pages/blocks */
671 		if (!(attr & TEE_MATTR_VALID_BLOCK))
672 			continue;
673 
674 		pmem = malloc(sizeof(struct tee_pager_pmem));
675 		if (pmem == NULL) {
676 			DMSG("Can't allocate memory");
677 			panic();
678 		}
679 
680 		pmem->va_alias = pager_add_alias_page(pa);
681 
682 		if (unmap) {
683 			pmem->area = NULL;
684 			pmem->pgidx = INVALID_PGIDX;
685 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
686 		} else {
687 			/*
688 			 * The page is still mapped, let's assign the area
689 			 * and update the protection bits accordingly.
690 			 */
691 			pmem->area = tee_pager_find_area(va);
692 			pmem->pgidx = pgidx;
693 			assert(pa == get_pmem_pa(pmem));
694 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
695 					   get_area_mattr(pmem->area));
696 		}
697 
698 		tee_pager_npages++;
699 		incr_npages_all();
700 		set_npages();
701 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
702 	}
703 
704 	/* Invalidate secure TLB */
705 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
706 }
707 
708 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
709 {
710 	bool unmaped = false;
711 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
712 
713 	cpu_spin_lock(&pager_lock);
714 
715 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
716 		panic();
717 
718 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
719 		unmaped |= tee_pager_release_one_zi(vaddr);
720 
721 	/* Invalidate secure TLB */
722 	if (unmaped)
723 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
724 
725 	cpu_spin_unlock(&pager_lock);
726 	thread_set_exceptions(exceptions);
727 }
728 
729 void *tee_pager_request_zi(size_t size)
730 {
731 	tee_mm_entry_t *mm;
732 
733 	if (!size)
734 		return NULL;
735 
736 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
737 	if (!mm)
738 		return NULL;
739 
740 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
741 
742 	return (void *)tee_mm_get_smem(mm);
743 }
744