xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision f17691b3f6b27866f66636a53685bd3a6f7daa8a)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <kernel/abort.h>
30 #include <kernel/panic.h>
31 #include <kernel/tee_misc.h>
32 #include <kernel/tee_ta_manager.h>
33 #include <kernel/thread.h>
34 #include <kernel/tz_proc.h>
35 #include <mm/core_mmu.h>
36 #include <mm/tee_mm.h>
37 #include <mm/tee_mmu_defs.h>
38 #include <mm/tee_pager.h>
39 #include <types_ext.h>
40 #include <stdlib.h>
41 #include <tee_api_defines.h>
42 #include <tee/tee_cryp_provider.h>
43 #include <trace.h>
44 #include <utee_defines.h>
45 #include <util.h>
46 
47 struct tee_pager_area {
48 	const uint8_t *hashes;
49 	const uint8_t *store;
50 	uint32_t flags;
51 	tee_mm_entry_t *mm;
52 	TAILQ_ENTRY(tee_pager_area) link;
53 };
54 
55 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
56 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
57 
58 /*
59  * struct tee_pager_pmem - Represents a physical page used for paging.
60  *
61  * @pgidx	an index of the entry in tbl_info. The actual physical
62  *		address is stored here so even if the page isn't mapped,
63  *		there's always an MMU entry holding the physical address.
64  *
65  * @va_alias	Virtual address where the physical page always is aliased.
66  *		Used during remapping of the page when the content need to
67  *		be updated before it's available at the new location.
68  *
69  * @area	a pointer to the pager area
70  */
71 struct tee_pager_pmem {
72 	unsigned pgidx;
73 	void *va_alias;
74 	struct tee_pager_area *area;
75 	TAILQ_ENTRY(tee_pager_pmem) link;
76 };
77 
78 /* The list of physical pages. The first page in the list is the oldest */
79 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
80 
81 static struct tee_pager_pmem_head tee_pager_pmem_head =
82 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
83 
84 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
85 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
86 
87 /* number of pages hidden */
88 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
89 
90 /* Number of registered physical pages, used hiding pages. */
91 static size_t tee_pager_npages;
92 
93 #ifdef CFG_WITH_STATS
94 static struct tee_pager_stats pager_stats;
95 
96 static inline void incr_ro_hits(void)
97 {
98 	pager_stats.ro_hits++;
99 }
100 
101 static inline void incr_rw_hits(void)
102 {
103 	pager_stats.rw_hits++;
104 }
105 
106 static inline void incr_hidden_hits(void)
107 {
108 	pager_stats.hidden_hits++;
109 }
110 
111 static inline void incr_zi_released(void)
112 {
113 	pager_stats.zi_released++;
114 }
115 
116 static inline void incr_npages_all(void)
117 {
118 	pager_stats.npages_all++;
119 }
120 
121 static inline void set_npages(void)
122 {
123 	pager_stats.npages = tee_pager_npages;
124 }
125 
126 void tee_pager_get_stats(struct tee_pager_stats *stats)
127 {
128 	*stats = pager_stats;
129 
130 	pager_stats.hidden_hits = 0;
131 	pager_stats.ro_hits = 0;
132 	pager_stats.rw_hits = 0;
133 	pager_stats.zi_released = 0;
134 }
135 
136 #else /* CFG_WITH_STATS */
137 static inline void incr_ro_hits(void) { }
138 static inline void incr_rw_hits(void) { }
139 static inline void incr_hidden_hits(void) { }
140 static inline void incr_zi_released(void) { }
141 static inline void incr_npages_all(void) { }
142 static inline void set_npages(void) { }
143 
144 void tee_pager_get_stats(struct tee_pager_stats *stats)
145 {
146 	memset(stats, 0, sizeof(struct tee_pager_stats));
147 }
148 #endif /* CFG_WITH_STATS */
149 
150 /*
151  * Reference to translation table used to map the virtual memory range
152  * covered by the pager.
153  */
154 static struct core_mmu_table_info tbl_info;
155 
156 static unsigned pager_lock = SPINLOCK_UNLOCK;
157 
158 /* Defines the range of the alias area */
159 static tee_mm_entry_t *pager_alias_area;
160 /*
161  * Physical pages are added in a stack like fashion to the alias area,
162  * @pager_alias_next_free gives the address of next free entry if
163  * @pager_alias_next_free is != 0
164  */
165 static uintptr_t pager_alias_next_free;
166 
167 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
168 {
169 	struct core_mmu_table_info ti;
170 	size_t tbl_va_size;
171 	unsigned idx;
172 	unsigned last_idx;
173 	vaddr_t smem = tee_mm_get_smem(mm);
174 	size_t nbytes = tee_mm_get_bytes(mm);
175 
176 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
177 
178 	TEE_ASSERT(!pager_alias_area);
179 	if (!core_mmu_find_table(smem, UINT_MAX, &ti)) {
180 		DMSG("Can't find translation table");
181 		panic();
182 	}
183 	if ((1 << ti.shift) != SMALL_PAGE_SIZE) {
184 		DMSG("Unsupported page size in translation table %u",
185 		     1 << ti.shift);
186 		panic();
187 	}
188 
189 	tbl_va_size = (1 << ti.shift) * ti.num_entries;
190 	if (!core_is_buffer_inside(smem, nbytes,
191 				   ti.va_base, tbl_va_size)) {
192 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
193 			smem, nbytes, ti.va_base, tbl_va_size);
194 		panic();
195 	}
196 
197 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
198 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
199 
200 	pager_alias_area = mm;
201 	pager_alias_next_free = smem;
202 
203 	/* Clear all mapping in the alias area */
204 	idx = core_mmu_va2idx(&ti, smem);
205 	last_idx = core_mmu_va2idx(&ti, smem + nbytes);
206 	for (; idx < last_idx; idx++)
207 		core_mmu_set_entry(&ti, idx, 0, 0);
208 }
209 
210 static void *pager_add_alias_page(paddr_t pa)
211 {
212 	unsigned idx;
213 	struct core_mmu_table_info ti;
214 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
215 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
216 			TEE_MATTR_PRW;
217 
218 	DMSG("0x%" PRIxPA, pa);
219 
220 	TEE_ASSERT(pager_alias_next_free);
221 	if (!core_mmu_find_table(pager_alias_next_free, UINT_MAX, &ti))
222 		panic();
223 	idx = core_mmu_va2idx(&ti, pager_alias_next_free);
224 	core_mmu_set_entry(&ti, idx, pa, attr);
225 	pager_alias_next_free += SMALL_PAGE_SIZE;
226 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
227 				      tee_mm_get_bytes(pager_alias_area)))
228 		pager_alias_next_free = 0;
229 	return (void *)core_mmu_idx2va(&ti, idx);
230 }
231 
232 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
233 		const void *hashes)
234 {
235 	struct tee_pager_area *area;
236 	size_t tbl_va_size;
237 
238 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
239 		tee_mm_get_smem(mm),
240 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
241 		flags, store, hashes);
242 
243 	if (flags & TEE_PAGER_AREA_RO)
244 		TEE_ASSERT(store && hashes);
245 	else if (flags & TEE_PAGER_AREA_RW)
246 		TEE_ASSERT(!store && !hashes);
247 	else
248 		panic();
249 
250 	if (!tbl_info.num_entries) {
251 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
252 					&tbl_info))
253 			return false;
254 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
255 			DMSG("Unsupported page size in translation table %u",
256 			     1 << tbl_info.shift);
257 			return false;
258 		}
259 	}
260 
261 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
262 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
263 				   tbl_info.va_base, tbl_va_size)) {
264 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
265 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
266 			tbl_info.va_base, tbl_va_size);
267 		return false;
268 	}
269 
270 	area = malloc(sizeof(struct tee_pager_area));
271 	if (!area)
272 		return false;
273 
274 	area->mm = mm;
275 	area->flags = flags;
276 	area->store = store;
277 	area->hashes = hashes;
278 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
279 	return true;
280 }
281 
282 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
283 {
284 	struct tee_pager_area *area;
285 
286 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
287 		tee_mm_entry_t *mm = area->mm;
288 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
289 
290 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
291 			return area;
292 	}
293 	return NULL;
294 }
295 
296 static uint32_t get_area_mattr(struct tee_pager_area *area)
297 {
298 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
299 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
300 			TEE_MATTR_PR;
301 
302 	if (!(area->flags & TEE_PAGER_AREA_RO))
303 		attr |= TEE_MATTR_PW;
304 	if (area->flags & TEE_PAGER_AREA_X)
305 		attr |= TEE_MATTR_PX;
306 
307 	return attr;
308 }
309 
310 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
311 			void *va_alias)
312 {
313 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
314 
315 	if (area->store) {
316 		size_t rel_pg_idx = pg_idx - area->mm->offset;
317 		const void *stored_page = area->store +
318 					  rel_pg_idx * SMALL_PAGE_SIZE;
319 
320 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
321 		incr_ro_hits();
322 	} else {
323 		memset(va_alias, 0, SMALL_PAGE_SIZE);
324 		incr_rw_hits();
325 	}
326 }
327 
328 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
329 			void *va_alias)
330 {
331 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
332 
333 	if (area->store) {
334 		size_t rel_pg_idx = pg_idx - area->mm->offset;
335 		const void *hash = area->hashes +
336 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
337 
338 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
339 				TEE_SUCCESS) {
340 			EMSG("PH 0x%" PRIxVA " failed", page_va);
341 			panic();
342 		}
343 	}
344 }
345 
346 static bool tee_pager_unhide_page(vaddr_t page_va)
347 {
348 	struct tee_pager_pmem *pmem;
349 
350 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
351 		paddr_t pa;
352 		uint32_t attr;
353 
354 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
355 
356 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
357 			continue;
358 
359 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
360 			/* page is hidden, show and move to back */
361 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
362 					   get_area_mattr(pmem->area));
363 
364 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
365 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
366 
367 			/* TODO only invalidate entry touched above */
368 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
369 
370 			incr_hidden_hits();
371 			return true;
372 		}
373 	}
374 
375 	return false;
376 }
377 
378 static void tee_pager_hide_pages(void)
379 {
380 	struct tee_pager_pmem *pmem;
381 	size_t n = 0;
382 
383 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
384 		paddr_t pa;
385 		uint32_t attr;
386 
387 		if (n >= TEE_PAGER_NHIDE)
388 			break;
389 		n++;
390 
391 		/*
392 		 * we cannot hide pages when pmem->area is not defined as
393 		 * unhide requires pmem->area to be defined
394 		 */
395 		if (!pmem->area)
396 			continue;
397 
398 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
399 		if (!(attr & TEE_MATTR_VALID_BLOCK))
400 			continue;
401 
402 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
403 				   TEE_MATTR_HIDDEN_BLOCK);
404 
405 	}
406 
407 	/* TODO only invalidate entries touched above */
408 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
409 }
410 
411 /*
412  * Find mapped pmem, hide and move to pageble pmem.
413  * Return false if page was not mapped, and true if page was mapped.
414  */
415 static bool tee_pager_release_one_zi(vaddr_t page_va)
416 {
417 	struct tee_pager_pmem *pmem;
418 	unsigned pgidx;
419 	paddr_t pa;
420 	uint32_t attr;
421 
422 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
423 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
424 
425 #ifdef TEE_PAGER_DEBUG_PRINT
426 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
427 #endif
428 
429 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
430 		if (pmem->pgidx != pgidx)
431 			continue;
432 
433 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
434 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
435 		tee_pager_npages++;
436 		set_npages();
437 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
438 		incr_zi_released();
439 
440 
441 		return true;
442 	}
443 
444 	return false;
445 }
446 
447 /* Finds the oldest page and remaps it for the new virtual address */
448 static bool tee_pager_get_page(struct abort_info *ai,
449 			struct tee_pager_area *area,
450 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
451 {
452 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
453 	struct tee_pager_pmem *pmem;
454 	paddr_t pa;
455 	uint32_t attr;
456 
457 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
458 
459 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
460 
461 	if (attr & TEE_MATTR_PHYS_BLOCK) {
462 		/*
463 		 * There's an pmem entry using this mmu entry, let's use
464 		 * that entry in the new mapping.
465 		 */
466 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
467 			if (pmem->pgidx == pgidx)
468 				break;
469 		}
470 		if (!pmem) {
471 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
472 			return false;
473 		}
474 	} else {
475 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
476 		if (!pmem) {
477 			DMSG("No pmem entries");
478 			return false;
479 		}
480 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
481 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
482 	}
483 
484 	pmem->pgidx = pgidx;
485 	pmem->area = area;
486 	core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
487 
488 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
489 	if (area->store) {
490 		/* move page to back */
491 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
492 	} else {
493 		/* Move page to rw list */
494 		TEE_ASSERT(tee_pager_npages > 0);
495 		tee_pager_npages--;
496 		set_npages();
497 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
498 	}
499 
500 	/* TODO only invalidate entries touched above */
501 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
502 
503 	*pmem_ret = pmem;
504 	*pa_ret = pa;
505 	return true;
506 }
507 
508 static bool pager_check_access(struct abort_info *ai)
509 {
510 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
511 	uint32_t attr;
512 
513 	core_mmu_get_entry(&tbl_info, pgidx, NULL, &attr);
514 
515 	/* Not mapped */
516 	if (!(attr & TEE_MATTR_VALID_BLOCK))
517 		return false;
518 
519 	/* Not readable, should not happen */
520 	if (!(attr & TEE_MATTR_PR)) {
521 		abort_print_error(ai);
522 		panic();
523 	}
524 
525 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
526 	case CORE_MMU_FAULT_TRANSLATION:
527 	case CORE_MMU_FAULT_READ_PERMISSION:
528 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
529 		    !(attr & TEE_MATTR_PX)) {
530 			/* Attempting to execute from an NOX page */
531 			abort_print_error(ai);
532 			panic();
533 		}
534 		/* Since the page is mapped now it's OK */
535 		return true;
536 	case CORE_MMU_FAULT_WRITE_PERMISSION:
537 		if (!(attr & TEE_MATTR_PW)) {
538 			/* Attempting to write to an RO page */
539 			abort_print_error(ai);
540 			panic();
541 		}
542 		return true;
543 	default:
544 		/* Some fault we can't deal with */
545 		abort_print_error(ai);
546 		panic();
547 	}
548 
549 }
550 
551 void tee_pager_handle_fault(struct abort_info *ai)
552 {
553 	struct tee_pager_area *area;
554 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
555 	uint32_t exceptions;
556 
557 #ifdef TEE_PAGER_DEBUG_PRINT
558 	abort_print(ai);
559 #endif
560 
561 	/* check if the access is valid */
562 	area = tee_pager_find_area(ai->va);
563 	if (!area) {
564 		abort_print(ai);
565 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
566 		panic();
567 	}
568 
569 	/*
570 	 * We're updating pages that can affect several active CPUs at a
571 	 * time below. We end up here because a thread tries to access some
572 	 * memory that isn't available. We have to be careful when making
573 	 * that memory available as other threads may succeed in accessing
574 	 * that address the moment after we've made it available.
575 	 *
576 	 * That means that we can't just map the memory and populate the
577 	 * page, instead we use the aliased mapping to populate the page
578 	 * and once everything is ready we map it.
579 	 */
580 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
581 	cpu_spin_lock(&pager_lock);
582 
583 	if (!tee_pager_unhide_page(page_va)) {
584 		struct tee_pager_pmem *pmem = NULL;
585 		paddr_t pa = 0;
586 
587 		/*
588 		 * The page wasn't hidden, but some other core may have
589 		 * updated the table entry before we got here.
590 		 */
591 		if (pager_check_access(ai)) {
592 			/*
593 			 * Kind of access is OK with the mapping, we're
594 			 * done here because the fault has already been
595 			 * dealt with by another core.
596 			 */
597 			goto out;
598 		}
599 
600 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
601 			abort_print(ai);
602 			panic();
603 		}
604 
605 		/* load page code & data */
606 		tee_pager_load_page(area, page_va, pmem->va_alias);
607 		tee_pager_verify_page(area, page_va, pmem->va_alias);
608 
609 		/*
610 		 * We've updated the page using the aliased mapping and
611 		 * some cache maintenence is now needed if it's an
612 		 * executable page.
613 		 *
614 		 * Since the d-cache is a Physically-indexed,
615 		 * physically-tagged (PIPT) cache we can clean the aliased
616 		 * address instead of the real virtual address.
617 		 *
618 		 * The i-cache can also be PIPT, but may be something else
619 		 * to, to keep it simple we invalidate the entire i-cache.
620 		 * As a future optimization we may invalidate only the
621 		 * aliased area if it a PIPT cache else the entire cache.
622 		 */
623 		if (area->flags & TEE_PAGER_AREA_X) {
624 			/*
625 			 * Doing these operations to LoUIS (Level of
626 			 * unification, Inner Shareable) would be enough
627 			 */
628 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
629 				pmem->va_alias, SMALL_PAGE_SIZE);
630 
631 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
632 		}
633 
634 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
635 				   get_area_mattr(area));
636 
637 #ifdef TEE_PAGER_DEBUG_PRINT
638 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
639 		     core_mmu_idx2va(&tbl_info, pmem->pgidx), pa);
640 #endif
641 
642 	}
643 
644 	tee_pager_hide_pages();
645 out:
646 	cpu_spin_unlock(&pager_lock);
647 	thread_unmask_exceptions(exceptions);
648 }
649 
650 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
651 {
652 	size_t n;
653 
654 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
655 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
656 
657 	/* setup memory */
658 	for (n = 0; n < npages; n++) {
659 		struct tee_pager_pmem *pmem;
660 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
661 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
662 		paddr_t pa;
663 		uint32_t attr;
664 
665 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
666 
667 		/* Ignore unmapped pages/blocks */
668 		if (!(attr & TEE_MATTR_VALID_BLOCK))
669 			continue;
670 
671 		pmem = malloc(sizeof(struct tee_pager_pmem));
672 		if (pmem == NULL) {
673 			DMSG("Can't allocate memory");
674 			panic();
675 		}
676 
677 		pmem->pgidx = pgidx;
678 		pmem->va_alias = pager_add_alias_page(pa);
679 
680 		if (unmap) {
681 			/*
682 			 * Note that we're making the page inaccessible
683 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
684 			 * indicate that the descriptor still holds a valid
685 			 * physical address of a page.
686 			 */
687 			pmem->area = NULL;
688 			core_mmu_set_entry(&tbl_info, pgidx, pa,
689 					   TEE_MATTR_PHYS_BLOCK);
690 		} else {
691 			/*
692 			 * The page is still mapped, let's assign the area
693 			 * and update the protection bits accordingly.
694 			 */
695 			pmem->area = tee_pager_find_area(va);
696 			core_mmu_set_entry(&tbl_info, pgidx, pa,
697 					   get_area_mattr(pmem->area));
698 		}
699 
700 		tee_pager_npages++;
701 		incr_npages_all();
702 		set_npages();
703 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
704 	}
705 
706 	/* Invalidate secure TLB */
707 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
708 }
709 
710 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
711 {
712 	bool unmaped = false;
713 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
714 
715 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
716 		panic();
717 
718 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
719 		unmaped |= tee_pager_release_one_zi(vaddr);
720 
721 	/* Invalidate secure TLB */
722 	if (unmaped)
723 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
724 
725 	thread_set_exceptions(exceptions);
726 }
727 
728 void *tee_pager_request_zi(size_t size)
729 {
730 	tee_mm_entry_t *mm;
731 
732 	if (!size)
733 		return NULL;
734 
735 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
736 	if (!mm)
737 		return NULL;
738 
739 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
740 
741 	return (void *)tee_mm_get_smem(mm);
742 }
743