xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision d5d0e72a5e3c4bd45d7c87278dbacc115695cfe3)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_mmu.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 
48 struct tee_pager_area {
49 	const uint8_t *hashes;
50 	const uint8_t *store;
51 	uint32_t flags;
52 	tee_mm_entry_t *mm;
53 	TAILQ_ENTRY(tee_pager_area) link;
54 };
55 
56 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
57 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
58 
59 /*
60  * struct tee_pager_pmem - Represents a physical page used for paging.
61  *
62  * @pgidx	an index of the entry in tee_pager_tbl_info. The actual physical
63  *		address is stored here so even if the page isn't mapped,
64  *		there's always an MMU entry holding the physical address.
65  *
66  * @va_alias	Virtual address where the physical page always is aliased.
67  *		Used during remapping of the page when the content need to
68  *		be updated before it's available at the new location.
69  *
70  * @area	a pointer to the pager area
71  */
72 struct tee_pager_pmem {
73 	unsigned pgidx;
74 	void *va_alias;
75 	struct tee_pager_area *area;
76 	TAILQ_ENTRY(tee_pager_pmem) link;
77 };
78 
79 /* The list of physical pages. The first page in the list is the oldest */
80 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
81 
82 static struct tee_pager_pmem_head tee_pager_pmem_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
84 
85 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
86 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
87 
88 /* number of pages hidden */
89 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
90 
91 /* Number of registered physical pages, used hiding pages. */
92 static size_t tee_pager_npages;
93 
94 #ifdef CFG_WITH_STATS
95 static struct tee_pager_stats pager_stats;
96 
97 static inline void incr_ro_hits(void)
98 {
99 	pager_stats.ro_hits++;
100 }
101 
102 static inline void incr_rw_hits(void)
103 {
104 	pager_stats.rw_hits++;
105 }
106 
107 static inline void incr_hidden_hits(void)
108 {
109 	pager_stats.hidden_hits++;
110 }
111 
112 static inline void incr_zi_released(void)
113 {
114 	pager_stats.zi_released++;
115 }
116 
117 static inline void incr_npages_all(void)
118 {
119 	pager_stats.npages_all++;
120 }
121 
122 static inline void set_npages(void)
123 {
124 	pager_stats.npages = tee_pager_npages;
125 }
126 
127 void tee_pager_get_stats(struct tee_pager_stats *stats)
128 {
129 	*stats = pager_stats;
130 
131 	pager_stats.hidden_hits = 0;
132 	pager_stats.ro_hits = 0;
133 	pager_stats.rw_hits = 0;
134 	pager_stats.zi_released = 0;
135 }
136 
137 #else /* CFG_WITH_STATS */
138 static inline void incr_ro_hits(void) { }
139 static inline void incr_rw_hits(void) { }
140 static inline void incr_hidden_hits(void) { }
141 static inline void incr_zi_released(void) { }
142 static inline void incr_npages_all(void) { }
143 static inline void set_npages(void) { }
144 
145 void tee_pager_get_stats(struct tee_pager_stats *stats)
146 {
147 	memset(stats, 0, sizeof(struct tee_pager_stats));
148 }
149 #endif /* CFG_WITH_STATS */
150 
151 struct core_mmu_table_info tee_pager_tbl_info;
152 static struct core_mmu_table_info pager_alias_tbl_info;
153 
154 static unsigned pager_lock = SPINLOCK_UNLOCK;
155 
156 /* Defines the range of the alias area */
157 static tee_mm_entry_t *pager_alias_area;
158 /*
159  * Physical pages are added in a stack like fashion to the alias area,
160  * @pager_alias_next_free gives the address of next free entry if
161  * @pager_alias_next_free is != 0
162  */
163 static uintptr_t pager_alias_next_free;
164 
165 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
166 {
167 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
168 	size_t tbl_va_size;
169 	unsigned idx;
170 	unsigned last_idx;
171 	vaddr_t smem = tee_mm_get_smem(mm);
172 	size_t nbytes = tee_mm_get_bytes(mm);
173 
174 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
175 
176 	TEE_ASSERT(!pager_alias_area);
177 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
178 		DMSG("Can't find translation table");
179 		panic();
180 	}
181 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
182 		DMSG("Unsupported page size in translation table %u",
183 		     1 << ti->shift);
184 		panic();
185 	}
186 
187 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
188 	if (!core_is_buffer_inside(smem, nbytes,
189 				   ti->va_base, tbl_va_size)) {
190 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
191 			smem, nbytes, ti->va_base, tbl_va_size);
192 		panic();
193 	}
194 
195 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
196 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
197 
198 	pager_alias_area = mm;
199 	pager_alias_next_free = smem;
200 
201 	/* Clear all mapping in the alias area */
202 	idx = core_mmu_va2idx(ti, smem);
203 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
204 	for (; idx < last_idx; idx++)
205 		core_mmu_set_entry(ti, idx, 0, 0);
206 }
207 
208 static void *pager_add_alias_page(paddr_t pa)
209 {
210 	unsigned idx;
211 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
212 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
213 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
214 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
215 
216 	DMSG("0x%" PRIxPA, pa);
217 
218 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
219 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
220 	core_mmu_set_entry(ti, idx, pa, attr);
221 	pager_alias_next_free += SMALL_PAGE_SIZE;
222 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
223 				      tee_mm_get_bytes(pager_alias_area)))
224 		pager_alias_next_free = 0;
225 	return (void *)core_mmu_idx2va(ti, idx);
226 }
227 
228 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
229 		const void *hashes)
230 {
231 	struct tee_pager_area *area;
232 	size_t tbl_va_size;
233 	uint32_t exceptions;
234 
235 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
236 		tee_mm_get_smem(mm),
237 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
238 		flags, store, hashes);
239 
240 	if (flags & TEE_PAGER_AREA_RO)
241 		TEE_ASSERT(store && hashes);
242 	else if (flags & TEE_PAGER_AREA_RW)
243 		TEE_ASSERT(!store && !hashes);
244 	else
245 		panic();
246 
247 	if (!tee_pager_tbl_info.num_entries) {
248 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
249 					&tee_pager_tbl_info))
250 			return false;
251 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
252 			DMSG("Unsupported page size in translation table %u",
253 			     1 << tee_pager_tbl_info.shift);
254 			return false;
255 		}
256 	}
257 
258 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
259 			tee_pager_tbl_info.num_entries;
260 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
261 				   tee_pager_tbl_info.va_base, tbl_va_size)) {
262 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
263 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
264 			tee_pager_tbl_info.va_base, tbl_va_size);
265 		return false;
266 	}
267 
268 	area = malloc(sizeof(struct tee_pager_area));
269 	if (!area)
270 		return false;
271 
272 	area->mm = mm;
273 	area->flags = flags;
274 	area->store = store;
275 	area->hashes = hashes;
276 
277 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
278 	cpu_spin_lock(&pager_lock);
279 
280 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
281 
282 	cpu_spin_unlock(&pager_lock);
283 	thread_set_exceptions(exceptions);
284 	return true;
285 }
286 
287 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
288 {
289 	struct tee_pager_area *area;
290 
291 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
292 		tee_mm_entry_t *mm = area->mm;
293 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
294 
295 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
296 			return area;
297 	}
298 	return NULL;
299 }
300 
301 static uint32_t get_area_mattr(struct tee_pager_area *area)
302 {
303 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
304 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
305 			TEE_MATTR_SECURE | TEE_MATTR_PR;
306 
307 	if (!(area->flags & TEE_PAGER_AREA_RO))
308 		attr |= TEE_MATTR_PW;
309 	if (area->flags & TEE_PAGER_AREA_X)
310 		attr |= TEE_MATTR_PX;
311 
312 	return attr;
313 }
314 
315 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
316 			void *va_alias)
317 {
318 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
319 
320 	if (area->store) {
321 		size_t rel_pg_idx = pg_idx - area->mm->offset;
322 		const void *stored_page = area->store +
323 					  rel_pg_idx * SMALL_PAGE_SIZE;
324 
325 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
326 		incr_ro_hits();
327 	} else {
328 		memset(va_alias, 0, SMALL_PAGE_SIZE);
329 		incr_rw_hits();
330 	}
331 }
332 
333 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
334 			void *va_alias)
335 {
336 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
337 
338 	if (area->store) {
339 		size_t rel_pg_idx = pg_idx - area->mm->offset;
340 		const void *hash = area->hashes +
341 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
342 
343 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
344 				TEE_SUCCESS) {
345 			EMSG("PH 0x%" PRIxVA " failed", page_va);
346 			panic();
347 		}
348 	}
349 }
350 
351 static bool tee_pager_unhide_page(vaddr_t page_va)
352 {
353 	struct tee_pager_pmem *pmem;
354 
355 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
356 		paddr_t pa;
357 		uint32_t attr;
358 
359 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
360 				   &pa, &attr);
361 
362 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
363 			continue;
364 
365 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
366 		    pmem->pgidx) {
367 			/* page is hidden, show and move to back */
368 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
369 					   get_area_mattr(pmem->area));
370 
371 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
372 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
373 
374 			/* TODO only invalidate entry touched above */
375 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
376 
377 			incr_hidden_hits();
378 			return true;
379 		}
380 	}
381 
382 	return false;
383 }
384 
385 static void tee_pager_hide_pages(void)
386 {
387 	struct tee_pager_pmem *pmem;
388 	size_t n = 0;
389 
390 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
391 		paddr_t pa;
392 		uint32_t attr;
393 
394 		if (n >= TEE_PAGER_NHIDE)
395 			break;
396 		n++;
397 
398 		/*
399 		 * we cannot hide pages when pmem->area is not defined as
400 		 * unhide requires pmem->area to be defined
401 		 */
402 		if (!pmem->area)
403 			continue;
404 
405 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
406 				   &pa, &attr);
407 		if (!(attr & TEE_MATTR_VALID_BLOCK))
408 			continue;
409 
410 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
411 				   TEE_MATTR_HIDDEN_BLOCK);
412 
413 	}
414 
415 	/* TODO only invalidate entries touched above */
416 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
417 }
418 
419 /*
420  * Find mapped pmem, hide and move to pageble pmem.
421  * Return false if page was not mapped, and true if page was mapped.
422  */
423 static bool tee_pager_release_one_zi(vaddr_t page_va)
424 {
425 	struct tee_pager_pmem *pmem;
426 	unsigned pgidx;
427 	paddr_t pa;
428 	uint32_t attr;
429 
430 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
431 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
432 
433 #ifdef TEE_PAGER_DEBUG_PRINT
434 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
435 #endif
436 
437 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
438 		if (pmem->pgidx != pgidx)
439 			continue;
440 
441 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
442 				   TEE_MATTR_PHYS_BLOCK);
443 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
444 		tee_pager_npages++;
445 		set_npages();
446 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
447 		incr_zi_released();
448 
449 
450 		return true;
451 	}
452 
453 	return false;
454 }
455 
456 /* Finds the oldest page and remaps it for the new virtual address */
457 static bool tee_pager_get_page(struct abort_info *ai,
458 			struct tee_pager_area *area,
459 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
460 {
461 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
462 	struct tee_pager_pmem *pmem;
463 	paddr_t pa;
464 	uint32_t attr;
465 
466 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
467 
468 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
469 
470 	if (attr & TEE_MATTR_PHYS_BLOCK) {
471 		/*
472 		 * There's an pmem entry using this mmu entry, let's use
473 		 * that entry in the new mapping.
474 		 */
475 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
476 			if (pmem->pgidx == pgidx)
477 				break;
478 		}
479 		if (!pmem) {
480 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
481 			return false;
482 		}
483 	} else {
484 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
485 		if (!pmem) {
486 			DMSG("No pmem entries");
487 			return false;
488 		}
489 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
490 				   &pa, &attr);
491 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
492 	}
493 
494 	pmem->pgidx = pgidx;
495 	pmem->area = area;
496 	core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
497 			   TEE_MATTR_PHYS_BLOCK);
498 
499 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
500 	if (area->store) {
501 		/* move page to back */
502 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
503 	} else {
504 		/* Move page to rw list */
505 		TEE_ASSERT(tee_pager_npages > 0);
506 		tee_pager_npages--;
507 		set_npages();
508 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
509 	}
510 
511 	/* TODO only invalidate entries touched above */
512 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
513 
514 	*pmem_ret = pmem;
515 	*pa_ret = pa;
516 	return true;
517 }
518 
519 static bool pager_check_access(struct abort_info *ai)
520 {
521 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
522 	uint32_t attr;
523 
524 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, NULL, &attr);
525 
526 	/* Not mapped */
527 	if (!(attr & TEE_MATTR_VALID_BLOCK))
528 		return false;
529 
530 	/* Not readable, should not happen */
531 	if (!(attr & TEE_MATTR_PR)) {
532 		abort_print_error(ai);
533 		panic();
534 	}
535 
536 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
537 	case CORE_MMU_FAULT_TRANSLATION:
538 	case CORE_MMU_FAULT_READ_PERMISSION:
539 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
540 		    !(attr & TEE_MATTR_PX)) {
541 			/* Attempting to execute from an NOX page */
542 			abort_print_error(ai);
543 			panic();
544 		}
545 		/* Since the page is mapped now it's OK */
546 		return true;
547 	case CORE_MMU_FAULT_WRITE_PERMISSION:
548 		if (!(attr & TEE_MATTR_PW)) {
549 			/* Attempting to write to an RO page */
550 			abort_print_error(ai);
551 			panic();
552 		}
553 		return true;
554 	default:
555 		/* Some fault we can't deal with */
556 		abort_print_error(ai);
557 		panic();
558 	}
559 
560 }
561 
562 void tee_pager_handle_fault(struct abort_info *ai)
563 {
564 	struct tee_pager_area *area;
565 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
566 	uint32_t exceptions;
567 
568 #ifdef TEE_PAGER_DEBUG_PRINT
569 	abort_print(ai);
570 #endif
571 
572 	/*
573 	 * We're updating pages that can affect several active CPUs at a
574 	 * time below. We end up here because a thread tries to access some
575 	 * memory that isn't available. We have to be careful when making
576 	 * that memory available as other threads may succeed in accessing
577 	 * that address the moment after we've made it available.
578 	 *
579 	 * That means that we can't just map the memory and populate the
580 	 * page, instead we use the aliased mapping to populate the page
581 	 * and once everything is ready we map it.
582 	 */
583 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
584 	cpu_spin_lock(&pager_lock);
585 
586 	/* check if the access is valid */
587 	area = tee_pager_find_area(ai->va);
588 	if (!area) {
589 		abort_print_error(ai);
590 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
591 		panic();
592 	}
593 
594 	if (!tee_pager_unhide_page(page_va)) {
595 		struct tee_pager_pmem *pmem = NULL;
596 		paddr_t pa = 0;
597 
598 		/*
599 		 * The page wasn't hidden, but some other core may have
600 		 * updated the table entry before we got here.
601 		 */
602 		if (pager_check_access(ai)) {
603 			/*
604 			 * Kind of access is OK with the mapping, we're
605 			 * done here because the fault has already been
606 			 * dealt with by another core.
607 			 */
608 			goto out;
609 		}
610 
611 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
612 			abort_print(ai);
613 			panic();
614 		}
615 
616 		/* load page code & data */
617 		tee_pager_load_page(area, page_va, pmem->va_alias);
618 		tee_pager_verify_page(area, page_va, pmem->va_alias);
619 
620 		/*
621 		 * We've updated the page using the aliased mapping and
622 		 * some cache maintenence is now needed if it's an
623 		 * executable page.
624 		 *
625 		 * Since the d-cache is a Physically-indexed,
626 		 * physically-tagged (PIPT) cache we can clean the aliased
627 		 * address instead of the real virtual address.
628 		 *
629 		 * The i-cache can also be PIPT, but may be something else
630 		 * to, to keep it simple we invalidate the entire i-cache.
631 		 * As a future optimization we may invalidate only the
632 		 * aliased area if it a PIPT cache else the entire cache.
633 		 */
634 		if (area->flags & TEE_PAGER_AREA_X) {
635 			/*
636 			 * Doing these operations to LoUIS (Level of
637 			 * unification, Inner Shareable) would be enough
638 			 */
639 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
640 				pmem->va_alias, SMALL_PAGE_SIZE);
641 
642 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
643 		}
644 
645 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
646 				   get_area_mattr(area));
647 
648 #ifdef TEE_PAGER_DEBUG_PRINT
649 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
650 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx), pa);
651 #endif
652 
653 	}
654 
655 	tee_pager_hide_pages();
656 out:
657 	cpu_spin_unlock(&pager_lock);
658 	thread_unmask_exceptions(exceptions);
659 }
660 
661 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
662 {
663 	size_t n;
664 
665 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
666 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
667 
668 	/* setup memory */
669 	for (n = 0; n < npages; n++) {
670 		struct tee_pager_pmem *pmem;
671 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
672 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
673 		paddr_t pa;
674 		uint32_t attr;
675 
676 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
677 
678 		/* Ignore unmapped pages/blocks */
679 		if (!(attr & TEE_MATTR_VALID_BLOCK))
680 			continue;
681 
682 		pmem = malloc(sizeof(struct tee_pager_pmem));
683 		if (pmem == NULL) {
684 			DMSG("Can't allocate memory");
685 			panic();
686 		}
687 
688 		pmem->pgidx = pgidx;
689 		pmem->va_alias = pager_add_alias_page(pa);
690 
691 		if (unmap) {
692 			/*
693 			 * Note that we're making the page inaccessible
694 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
695 			 * indicate that the descriptor still holds a valid
696 			 * physical address of a page.
697 			 */
698 			pmem->area = NULL;
699 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
700 					   TEE_MATTR_PHYS_BLOCK);
701 		} else {
702 			/*
703 			 * The page is still mapped, let's assign the area
704 			 * and update the protection bits accordingly.
705 			 */
706 			pmem->area = tee_pager_find_area(va);
707 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
708 					   get_area_mattr(pmem->area));
709 		}
710 
711 		tee_pager_npages++;
712 		incr_npages_all();
713 		set_npages();
714 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
715 	}
716 
717 	/* Invalidate secure TLB */
718 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
719 }
720 
721 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
722 {
723 	bool unmaped = false;
724 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
725 
726 	cpu_spin_lock(&pager_lock);
727 
728 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
729 		panic();
730 
731 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
732 		unmaped |= tee_pager_release_one_zi(vaddr);
733 
734 	/* Invalidate secure TLB */
735 	if (unmaped)
736 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
737 
738 	cpu_spin_unlock(&pager_lock);
739 	thread_set_exceptions(exceptions);
740 }
741 
742 void *tee_pager_request_zi(size_t size)
743 {
744 	tee_mm_entry_t *mm;
745 
746 	if (!size)
747 		return NULL;
748 
749 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
750 	if (!mm)
751 		return NULL;
752 
753 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
754 
755 	return (void *)tee_mm_get_smem(mm);
756 }
757