xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 16841254c18912671256ad8c2cfe921a73b3e354)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/abort.h>
35 #include <kernel/panic.h>
36 #include <mm/tee_mmu_defs.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/tee_kta_trace.h>
39 #include <kernel/misc.h>
40 #include <kernel/tee_misc.h>
41 #include <kernel/tz_proc.h>
42 #include <mm/tee_pager.h>
43 #include <mm/tee_mm.h>
44 #include <mm/core_mmu.h>
45 #include <tee/arch_svc.h>
46 #include <arm.h>
47 #include <tee/tee_cryp_provider.h>
48 #include <tee_api_defines.h>
49 #include <utee_defines.h>
50 #include <trace.h>
51 #include <util.h>
52 
53 #ifdef CFG_WITH_PAGER
54 struct tee_pager_area {
55 	const uint8_t *hashes;
56 	const uint8_t *store;
57 	uint32_t flags;
58 	tee_mm_entry_t *mm;
59 	TAILQ_ENTRY(tee_pager_area) link;
60 };
61 
62 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
63 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
64 
65 /*
66  * struct tee_pager_pmem - Represents a physical page used for paging.
67  *
68  * @pgidx	an index of the entry in tbl_info. The actual physical
69  *		address is stored here so even if the page isn't mapped,
70  *		there's always an MMU entry holding the physical address.
71  *
72  * @va_alias	Virtual address where the physical page always is aliased.
73  *		Used during remapping of the page when the content need to
74  *		be updated before it's available at the new location.
75  *
76  * @area	a pointer to the pager area
77  */
78 struct tee_pager_pmem {
79 	unsigned pgidx;
80 	void *va_alias;
81 	struct tee_pager_area *area;
82 	TAILQ_ENTRY(tee_pager_pmem) link;
83 };
84 
85 /* The list of physical pages. The first page in the list is the oldest */
86 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
87 
88 static struct tee_pager_pmem_head tee_pager_pmem_head =
89 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
90 
91 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
92 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
93 
94 /* number of pages hidden */
95 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
96 
97 /* Number of registered physical pages, used hiding pages. */
98 static size_t tee_pager_npages;
99 
100 #ifdef CFG_WITH_STATS
101 static struct tee_pager_stats pager_stats;
102 
103 static inline void incr_ro_hits(void)
104 {
105 	pager_stats.ro_hits++;
106 }
107 
108 static inline void incr_rw_hits(void)
109 {
110 	pager_stats.rw_hits++;
111 }
112 
113 static inline void incr_hidden_hits(void)
114 {
115 	pager_stats.hidden_hits++;
116 }
117 
118 static inline void incr_zi_released(void)
119 {
120 	pager_stats.zi_released++;
121 }
122 
123 static inline void incr_npages_all(void)
124 {
125 	pager_stats.npages_all++;
126 }
127 
128 static inline void set_npages(void)
129 {
130 	pager_stats.npages = tee_pager_npages;
131 }
132 
133 void tee_pager_get_stats(struct tee_pager_stats *stats)
134 {
135 	*stats = pager_stats;
136 
137 	pager_stats.hidden_hits = 0;
138 	pager_stats.ro_hits = 0;
139 	pager_stats.rw_hits = 0;
140 	pager_stats.zi_released = 0;
141 }
142 
143 #else /* CFG_WITH_STATS */
144 static inline void incr_ro_hits(void) { }
145 static inline void incr_rw_hits(void) { }
146 static inline void incr_hidden_hits(void) { }
147 static inline void incr_zi_released(void) { }
148 static inline void incr_npages_all(void) { }
149 static inline void set_npages(void) { }
150 
151 void tee_pager_get_stats(struct tee_pager_stats *stats)
152 {
153 	memset(stats, 0, sizeof(struct tee_pager_stats));
154 }
155 #endif /* CFG_WITH_STATS */
156 
157 /*
158  * Reference to translation table used to map the virtual memory range
159  * covered by the pager.
160  */
161 static struct core_mmu_table_info tbl_info;
162 
163 static unsigned pager_lock = SPINLOCK_UNLOCK;
164 
165 /* Defines the range of the alias area */
166 static tee_mm_entry_t *pager_alias_area;
167 /*
168  * Physical pages are added in a stack like fashion to the alias area,
169  * @pager_alias_next_free gives the address of next free entry if
170  * @pager_alias_next_free is != 0
171  */
172 static uintptr_t pager_alias_next_free;
173 
174 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
175 {
176 	struct core_mmu_table_info ti;
177 	size_t tbl_va_size;
178 	unsigned idx;
179 	unsigned last_idx;
180 	vaddr_t smem = tee_mm_get_smem(mm);
181 	size_t nbytes = tee_mm_get_bytes(mm);
182 
183 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
184 
185 	TEE_ASSERT(!pager_alias_area);
186 	if (!core_mmu_find_table(smem, UINT_MAX, &ti)) {
187 		DMSG("Can't find translation table");
188 		panic();
189 	}
190 	if ((1 << ti.shift) != SMALL_PAGE_SIZE) {
191 		DMSG("Unsupported page size in translation table %u",
192 		     1 << ti.shift);
193 		panic();
194 	}
195 
196 	tbl_va_size = (1 << ti.shift) * ti.num_entries;
197 	if (!core_is_buffer_inside(smem, nbytes,
198 				   ti.va_base, tbl_va_size)) {
199 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
200 			smem, nbytes, ti.va_base, tbl_va_size);
201 		panic();
202 	}
203 
204 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
205 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
206 
207 	pager_alias_area = mm;
208 	pager_alias_next_free = smem;
209 
210 	/* Clear all mapping in the alias area */
211 	idx = core_mmu_va2idx(&ti, smem);
212 	last_idx = core_mmu_va2idx(&ti, smem + nbytes);
213 	for (; idx < last_idx; idx++)
214 		core_mmu_set_entry(&ti, idx, 0, 0);
215 }
216 
217 static void *pager_add_alias_page(paddr_t pa)
218 {
219 	unsigned idx;
220 	struct core_mmu_table_info ti;
221 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
222 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
223 			TEE_MATTR_PRW;
224 
225 	DMSG("0x%" PRIxPA, pa);
226 
227 	TEE_ASSERT(pager_alias_next_free);
228 	if (!core_mmu_find_table(pager_alias_next_free, UINT_MAX, &ti))
229 		panic();
230 	idx = core_mmu_va2idx(&ti, pager_alias_next_free);
231 	core_mmu_set_entry(&ti, idx, pa, attr);
232 	pager_alias_next_free += SMALL_PAGE_SIZE;
233 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
234 				      tee_mm_get_bytes(pager_alias_area)))
235 		pager_alias_next_free = 0;
236 	return (void *)core_mmu_idx2va(&ti, idx);
237 }
238 
239 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
240 		const void *hashes)
241 {
242 	struct tee_pager_area *area;
243 	size_t tbl_va_size;
244 
245 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
246 		tee_mm_get_smem(mm),
247 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
248 		flags, store, hashes);
249 
250 	if (flags & TEE_PAGER_AREA_RO)
251 		TEE_ASSERT(store && hashes);
252 	else if (flags & TEE_PAGER_AREA_RW)
253 		TEE_ASSERT(!store && !hashes);
254 	else
255 		panic();
256 
257 	if (!tbl_info.num_entries) {
258 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
259 					&tbl_info))
260 			return false;
261 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
262 			DMSG("Unsupported page size in translation table %u",
263 			     1 << tbl_info.shift);
264 			return false;
265 		}
266 	}
267 
268 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
269 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
270 				   tbl_info.va_base, tbl_va_size)) {
271 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
272 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
273 			tbl_info.va_base, tbl_va_size);
274 		return false;
275 	}
276 
277 	area = malloc(sizeof(struct tee_pager_area));
278 	if (!area)
279 		return false;
280 
281 	area->mm = mm;
282 	area->flags = flags;
283 	area->store = store;
284 	area->hashes = hashes;
285 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
286 	return true;
287 }
288 
289 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
290 {
291 	struct tee_pager_area *area;
292 
293 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
294 		tee_mm_entry_t *mm = area->mm;
295 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
296 
297 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
298 			return area;
299 	}
300 	return NULL;
301 }
302 
303 static uint32_t get_area_mattr(struct tee_pager_area *area)
304 {
305 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
306 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
307 			TEE_MATTR_PR;
308 
309 	if (!(area->flags & TEE_PAGER_AREA_RO))
310 		attr |= TEE_MATTR_PW;
311 	if (area->flags & TEE_PAGER_AREA_X)
312 		attr |= TEE_MATTR_PX;
313 
314 	return attr;
315 }
316 
317 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
318 			void *va_alias)
319 {
320 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
321 
322 	if (area->store) {
323 		size_t rel_pg_idx = pg_idx - area->mm->offset;
324 		const void *stored_page = area->store +
325 					  rel_pg_idx * SMALL_PAGE_SIZE;
326 
327 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
328 		incr_ro_hits();
329 	} else {
330 		memset(va_alias, 0, SMALL_PAGE_SIZE);
331 		incr_rw_hits();
332 	}
333 }
334 
335 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
336 			void *va_alias)
337 {
338 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
339 
340 	if (area->store) {
341 		size_t rel_pg_idx = pg_idx - area->mm->offset;
342 		const void *hash = area->hashes +
343 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
344 
345 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
346 				TEE_SUCCESS) {
347 			EMSG("PH 0x%" PRIxVA " failed", page_va);
348 			panic();
349 		}
350 	}
351 }
352 
353 static bool tee_pager_unhide_page(vaddr_t page_va)
354 {
355 	struct tee_pager_pmem *pmem;
356 
357 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
358 		paddr_t pa;
359 		uint32_t attr;
360 
361 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
362 
363 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
364 			continue;
365 
366 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
367 			/* page is hidden, show and move to back */
368 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
369 					   get_area_mattr(pmem->area));
370 
371 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
372 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
373 
374 			/* TODO only invalidate entry touched above */
375 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
376 
377 			incr_hidden_hits();
378 			return true;
379 		}
380 	}
381 
382 	return false;
383 }
384 
385 static void tee_pager_hide_pages(void)
386 {
387 	struct tee_pager_pmem *pmem;
388 	size_t n = 0;
389 
390 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
391 		paddr_t pa;
392 		uint32_t attr;
393 
394 		if (n >= TEE_PAGER_NHIDE)
395 			break;
396 		n++;
397 
398 		/*
399 		 * we cannot hide pages when pmem->area is not defined as
400 		 * unhide requires pmem->area to be defined
401 		 */
402 		if (!pmem->area)
403 			continue;
404 
405 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
406 		if (!(attr & TEE_MATTR_VALID_BLOCK))
407 			continue;
408 
409 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
410 				   TEE_MATTR_HIDDEN_BLOCK);
411 
412 	}
413 
414 	/* TODO only invalidate entries touched above */
415 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
416 }
417 
418 /*
419  * Find mapped pmem, hide and move to pageble pmem.
420  * Return false if page was not mapped, and true if page was mapped.
421  */
422 static bool tee_pager_release_one_zi(vaddr_t page_va)
423 {
424 	struct tee_pager_pmem *pmem;
425 	unsigned pgidx;
426 	paddr_t pa;
427 	uint32_t attr;
428 
429 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
430 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
431 
432 #ifdef TEE_PAGER_DEBUG_PRINT
433 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
434 #endif
435 
436 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
437 		if (pmem->pgidx != pgidx)
438 			continue;
439 
440 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
441 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
442 		tee_pager_npages++;
443 		set_npages();
444 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
445 		incr_zi_released();
446 
447 
448 		return true;
449 	}
450 
451 	return false;
452 }
453 
454 /* Finds the oldest page and remaps it for the new virtual address */
455 static bool tee_pager_get_page(struct tee_pager_abort_info *ai,
456 			struct tee_pager_area *area,
457 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
458 {
459 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
460 	struct tee_pager_pmem *pmem;
461 	paddr_t pa;
462 	uint32_t attr;
463 
464 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
465 
466 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
467 
468 	if (attr & TEE_MATTR_PHYS_BLOCK) {
469 		/*
470 		 * There's an pmem entry using this mmu entry, let's use
471 		 * that entry in the new mapping.
472 		 */
473 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
474 			if (pmem->pgidx == pgidx)
475 				break;
476 		}
477 		if (!pmem) {
478 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
479 			return false;
480 		}
481 	} else {
482 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
483 		if (!pmem) {
484 			DMSG("No pmem entries");
485 			return false;
486 		}
487 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
488 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
489 	}
490 
491 	pmem->pgidx = pgidx;
492 	pmem->area = area;
493 	core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
494 
495 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
496 	if (area->store) {
497 		/* move page to back */
498 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
499 	} else {
500 		/* Move page to rw list */
501 		TEE_ASSERT(tee_pager_npages > 0);
502 		tee_pager_npages--;
503 		set_npages();
504 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
505 	}
506 
507 	/* TODO only invalidate entries touched above */
508 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
509 
510 	*pmem_ret = pmem;
511 	*pa_ret = pa;
512 	return true;
513 }
514 
515 static bool pager_check_access(struct tee_pager_abort_info *ai)
516 {
517 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
518 	uint32_t attr;
519 
520 	core_mmu_get_entry(&tbl_info, pgidx, NULL, &attr);
521 
522 	/* Not mapped */
523 	if (!(attr & TEE_MATTR_VALID_BLOCK))
524 		return false;
525 
526 	/* Not readable, should not happen */
527 	if (!(attr & TEE_MATTR_PR)) {
528 		tee_pager_print_error_abort(ai);
529 		panic();
530 	}
531 
532 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
533 	case CORE_MMU_FAULT_TRANSLATION:
534 	case CORE_MMU_FAULT_READ_PERMISSION:
535 		if (ai->abort_type == THREAD_ABORT_PREFETCH &&
536 		    !(attr & TEE_MATTR_PX)) {
537 			/* Attempting to execute from an NOX page */
538 			tee_pager_print_error_abort(ai);
539 			panic();
540 		}
541 		/* Since the page is mapped now it's OK */
542 		return true;
543 	case CORE_MMU_FAULT_WRITE_PERMISSION:
544 		if (!(attr & TEE_MATTR_PW)) {
545 			/* Attempting to write to an RO page */
546 			tee_pager_print_error_abort(ai);
547 			panic();
548 		}
549 		return true;
550 	default:
551 		/* Some fault we can't deal with */
552 		tee_pager_print_error_abort(ai);
553 		panic();
554 	}
555 
556 }
557 
558 void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
559 {
560 	struct tee_pager_area *area;
561 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
562 	uint32_t exceptions;
563 
564 #ifdef TEE_PAGER_DEBUG_PRINT
565 	tee_pager_print_abort(ai);
566 #endif
567 
568 	/* check if the access is valid */
569 	area = tee_pager_find_area(ai->va);
570 	if (!area) {
571 		tee_pager_print_abort(ai);
572 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
573 		panic();
574 	}
575 
576 	/*
577 	 * We're updating pages that can affect several active CPUs at a
578 	 * time below. We end up here because a thread tries to access some
579 	 * memory that isn't available. We have to be careful when making
580 	 * that memory available as other threads may succeed in accessing
581 	 * that address the moment after we've made it available.
582 	 *
583 	 * That means that we can't just map the memory and populate the
584 	 * page, instead we use the aliased mapping to populate the page
585 	 * and once everything is ready we map it.
586 	 */
587 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
588 	cpu_spin_lock(&pager_lock);
589 
590 	if (!tee_pager_unhide_page(page_va)) {
591 		struct tee_pager_pmem *pmem = NULL;
592 		paddr_t pa = 0;
593 
594 		/*
595 		 * The page wasn't hidden, but some other core may have
596 		 * updated the table entry before we got here.
597 		 */
598 		if (pager_check_access(ai)) {
599 			/*
600 			 * Kind of access is OK with the mapping, we're
601 			 * done here because the fault has already been
602 			 * dealt with by another core.
603 			 */
604 			goto out;
605 		}
606 
607 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
608 			tee_pager_print_abort(ai);
609 			panic();
610 		}
611 
612 		/* load page code & data */
613 		tee_pager_load_page(area, page_va, pmem->va_alias);
614 		tee_pager_verify_page(area, page_va, pmem->va_alias);
615 
616 		/*
617 		 * We've updated the page using the aliased mapping and
618 		 * some cache maintenence is now needed if it's an
619 		 * executable page.
620 		 *
621 		 * Since the d-cache is a Physically-indexed,
622 		 * physically-tagged (PIPT) cache we can clean the aliased
623 		 * address instead of the real virtual address.
624 		 *
625 		 * The i-cache can also be PIPT, but may be something else
626 		 * to, to keep it simple we invalidate the entire i-cache.
627 		 * As a future optimization we may invalidate only the
628 		 * aliased area if it a PIPT cache else the entire cache.
629 		 */
630 		if (area->flags & TEE_PAGER_AREA_X) {
631 			/*
632 			 * Doing these operations to LoUIS (Level of
633 			 * unification, Inner Shareable) would be enough
634 			 */
635 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
636 				pmem->va_alias, SMALL_PAGE_SIZE);
637 
638 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
639 		}
640 
641 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
642 				   get_area_mattr(area));
643 
644 #ifdef TEE_PAGER_DEBUG_PRINT
645 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
646 		     core_mmu_idx2va(&tbl_info, pmem->pgidx), pa);
647 #endif
648 
649 	}
650 
651 	tee_pager_hide_pages();
652 out:
653 	cpu_spin_unlock(&pager_lock);
654 	thread_unmask_exceptions(exceptions);
655 }
656 
657 #else /*CFG_WITH_PAGER*/
658 
659 void __noreturn tee_pager_handle_fault(struct tee_pager_abort_info *ai)
660 {
661 	/*
662 	 * Until PAGER is supported, trap CPU here.
663 	 */
664 	tee_pager_print_error_abort(ai);
665 	EMSG("Unexpected page fault! Trap CPU");
666 	panic();
667 }
668 
669 #endif /*CFG_WITH_PAGER*/
670 
671 #ifdef CFG_WITH_PAGER
672 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
673 {
674 	size_t n;
675 
676 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
677 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
678 
679 	/* setup memory */
680 	for (n = 0; n < npages; n++) {
681 		struct tee_pager_pmem *pmem;
682 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
683 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
684 		paddr_t pa;
685 		uint32_t attr;
686 
687 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
688 
689 		/* Ignore unmapped pages/blocks */
690 		if (!(attr & TEE_MATTR_VALID_BLOCK))
691 			continue;
692 
693 		pmem = malloc(sizeof(struct tee_pager_pmem));
694 		if (pmem == NULL) {
695 			DMSG("Can't allocate memory");
696 			panic();
697 		}
698 
699 		pmem->pgidx = pgidx;
700 		pmem->va_alias = pager_add_alias_page(pa);
701 
702 		if (unmap) {
703 			/*
704 			 * Note that we're making the page inaccessible
705 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
706 			 * indicate that the descriptor still holds a valid
707 			 * physical address of a page.
708 			 */
709 			pmem->area = NULL;
710 			core_mmu_set_entry(&tbl_info, pgidx, pa,
711 					   TEE_MATTR_PHYS_BLOCK);
712 		} else {
713 			/*
714 			 * The page is still mapped, let's assign the area
715 			 * and update the protection bits accordingly.
716 			 */
717 			pmem->area = tee_pager_find_area(va);
718 			core_mmu_set_entry(&tbl_info, pgidx, pa,
719 					   get_area_mattr(pmem->area));
720 		}
721 
722 		tee_pager_npages++;
723 		incr_npages_all();
724 		set_npages();
725 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
726 	}
727 
728 	/* Invalidate secure TLB */
729 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
730 }
731 
732 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
733 {
734 	bool unmaped = false;
735 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
736 
737 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
738 		panic();
739 
740 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
741 		unmaped |= tee_pager_release_one_zi(vaddr);
742 
743 	/* Invalidate secure TLB */
744 	if (unmaped)
745 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
746 
747 	thread_set_exceptions(exceptions);
748 }
749 
750 void *tee_pager_request_zi(size_t size)
751 {
752 	tee_mm_entry_t *mm;
753 
754 	if (!size)
755 		return NULL;
756 
757 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
758 	if (!mm)
759 		return NULL;
760 
761 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
762 
763 	return (void *)tee_mm_get_smem(mm);
764 }
765 
766 #else /*CFG_WITH_PAGER*/
767 
768 void tee_pager_get_stats(struct tee_pager_stats *stats)
769 {
770 	memset(stats, 0, sizeof(struct tee_pager_stats));
771 }
772 
773 #endif /*CFG_WITH_PAGER*/
774