xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 04c205f6f3ca0cc82303fb3417fbd3515e1253ea)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_mmu.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 
48 struct tee_pager_area {
49 	const uint8_t *hashes;
50 	const uint8_t *store;
51 	uint32_t flags;
52 	vaddr_t base;
53 	size_t size;
54 	TAILQ_ENTRY(tee_pager_area) link;
55 };
56 
57 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
58 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
59 
60 #define INVALID_PGIDX	UINT_MAX
61 
62 /*
63  * struct tee_pager_pmem - Represents a physical page used for paging.
64  *
65  * @pgidx	an index of the entry in tee_pager_tbl_info.
66  * @va_alias	Virtual address where the physical page always is aliased.
67  *		Used during remapping of the page when the content need to
68  *		be updated before it's available at the new location.
69  * @area	a pointer to the pager area
70  */
71 struct tee_pager_pmem {
72 	unsigned pgidx;
73 	void *va_alias;
74 	struct tee_pager_area *area;
75 	TAILQ_ENTRY(tee_pager_pmem) link;
76 };
77 
78 /* The list of physical pages. The first page in the list is the oldest */
79 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
80 
81 static struct tee_pager_pmem_head tee_pager_pmem_head =
82 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
83 
84 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
85 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
86 
87 /* number of pages hidden */
88 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
89 
90 /* Number of registered physical pages, used hiding pages. */
91 static size_t tee_pager_npages;
92 
93 #ifdef CFG_WITH_STATS
94 static struct tee_pager_stats pager_stats;
95 
96 static inline void incr_ro_hits(void)
97 {
98 	pager_stats.ro_hits++;
99 }
100 
101 static inline void incr_rw_hits(void)
102 {
103 	pager_stats.rw_hits++;
104 }
105 
106 static inline void incr_hidden_hits(void)
107 {
108 	pager_stats.hidden_hits++;
109 }
110 
111 static inline void incr_zi_released(void)
112 {
113 	pager_stats.zi_released++;
114 }
115 
116 static inline void incr_npages_all(void)
117 {
118 	pager_stats.npages_all++;
119 }
120 
121 static inline void set_npages(void)
122 {
123 	pager_stats.npages = tee_pager_npages;
124 }
125 
126 void tee_pager_get_stats(struct tee_pager_stats *stats)
127 {
128 	*stats = pager_stats;
129 
130 	pager_stats.hidden_hits = 0;
131 	pager_stats.ro_hits = 0;
132 	pager_stats.rw_hits = 0;
133 	pager_stats.zi_released = 0;
134 }
135 
136 #else /* CFG_WITH_STATS */
137 static inline void incr_ro_hits(void) { }
138 static inline void incr_rw_hits(void) { }
139 static inline void incr_hidden_hits(void) { }
140 static inline void incr_zi_released(void) { }
141 static inline void incr_npages_all(void) { }
142 static inline void set_npages(void) { }
143 
144 void tee_pager_get_stats(struct tee_pager_stats *stats)
145 {
146 	memset(stats, 0, sizeof(struct tee_pager_stats));
147 }
148 #endif /* CFG_WITH_STATS */
149 
150 struct core_mmu_table_info tee_pager_tbl_info;
151 static struct core_mmu_table_info pager_alias_tbl_info;
152 
153 static unsigned pager_lock = SPINLOCK_UNLOCK;
154 
155 /* Defines the range of the alias area */
156 static tee_mm_entry_t *pager_alias_area;
157 /*
158  * Physical pages are added in a stack like fashion to the alias area,
159  * @pager_alias_next_free gives the address of next free entry if
160  * @pager_alias_next_free is != 0
161  */
162 static uintptr_t pager_alias_next_free;
163 
164 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
165 {
166 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
167 	size_t tbl_va_size;
168 	unsigned idx;
169 	unsigned last_idx;
170 	vaddr_t smem = tee_mm_get_smem(mm);
171 	size_t nbytes = tee_mm_get_bytes(mm);
172 
173 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
174 
175 	TEE_ASSERT(!pager_alias_area);
176 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti)) {
177 		DMSG("Can't find translation table");
178 		panic();
179 	}
180 	if ((1 << ti->shift) != SMALL_PAGE_SIZE) {
181 		DMSG("Unsupported page size in translation table %u",
182 		     1 << ti->shift);
183 		panic();
184 	}
185 
186 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
187 	if (!core_is_buffer_inside(smem, nbytes,
188 				   ti->va_base, tbl_va_size)) {
189 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
190 			smem, nbytes, ti->va_base, tbl_va_size);
191 		panic();
192 	}
193 
194 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
195 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
196 
197 	pager_alias_area = mm;
198 	pager_alias_next_free = smem;
199 
200 	/* Clear all mapping in the alias area */
201 	idx = core_mmu_va2idx(ti, smem);
202 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
203 	for (; idx < last_idx; idx++)
204 		core_mmu_set_entry(ti, idx, 0, 0);
205 
206 	/* TODO only invalidate entries touched above */
207 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
208 }
209 
210 static void *pager_add_alias_page(paddr_t pa)
211 {
212 	unsigned idx;
213 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
214 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
215 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
216 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
217 
218 	DMSG("0x%" PRIxPA, pa);
219 
220 	TEE_ASSERT(pager_alias_next_free && ti->num_entries);
221 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
222 	core_mmu_set_entry(ti, idx, pa, attr);
223 	pager_alias_next_free += SMALL_PAGE_SIZE;
224 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
225 				      tee_mm_get_bytes(pager_alias_area)))
226 		pager_alias_next_free = 0;
227 	return (void *)core_mmu_idx2va(ti, idx);
228 }
229 
230 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
231 		const void *hashes)
232 {
233 	struct tee_pager_area *area;
234 	size_t tbl_va_size;
235 	uint32_t exceptions;
236 
237 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
238 		tee_mm_get_smem(mm),
239 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
240 		flags, store, hashes);
241 
242 	if (flags & TEE_PAGER_AREA_RO)
243 		TEE_ASSERT(store && hashes);
244 	else if (flags & TEE_PAGER_AREA_RW)
245 		TEE_ASSERT(!store && !hashes);
246 	else
247 		panic();
248 
249 	if (!tee_pager_tbl_info.num_entries) {
250 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
251 					&tee_pager_tbl_info))
252 			return false;
253 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
254 			DMSG("Unsupported page size in translation table %u",
255 			     1 << tee_pager_tbl_info.shift);
256 			return false;
257 		}
258 	}
259 
260 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
261 			tee_pager_tbl_info.num_entries;
262 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
263 				   tee_pager_tbl_info.va_base, tbl_va_size)) {
264 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
265 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
266 			tee_pager_tbl_info.va_base, tbl_va_size);
267 		return false;
268 	}
269 
270 	area = malloc(sizeof(struct tee_pager_area));
271 	if (!area)
272 		return false;
273 
274 	area->base = tee_mm_get_smem(mm);
275 	area->size = tee_mm_get_bytes(mm);
276 	area->flags = flags;
277 	area->store = store;
278 	area->hashes = hashes;
279 
280 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
281 	cpu_spin_lock(&pager_lock);
282 
283 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
284 
285 	cpu_spin_unlock(&pager_lock);
286 	thread_set_exceptions(exceptions);
287 	return true;
288 }
289 
290 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
291 {
292 	struct tee_pager_area *area;
293 
294 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
295 		if (core_is_buffer_inside(va, 1, area->base, area->size))
296 			return area;
297 	}
298 	return NULL;
299 }
300 
301 static uint32_t get_area_mattr(struct tee_pager_area *area)
302 {
303 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
304 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
305 			TEE_MATTR_SECURE | TEE_MATTR_PR;
306 
307 	if (!(area->flags & TEE_PAGER_AREA_RO))
308 		attr |= TEE_MATTR_PW;
309 	if (area->flags & TEE_PAGER_AREA_X)
310 		attr |= TEE_MATTR_PX;
311 
312 	return attr;
313 }
314 
315 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
316 {
317 	paddr_t pa;
318 	unsigned idx;
319 
320 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
321 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
322 	return pa;
323 }
324 
325 
326 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
327 			void *va_alias)
328 {
329 	if (area->store) {
330 		size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
331 		const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
332 
333 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
334 		incr_ro_hits();
335 	} else {
336 		memset(va_alias, 0, SMALL_PAGE_SIZE);
337 		incr_rw_hits();
338 	}
339 }
340 
341 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
342 			void *va_alias)
343 {
344 	if (area->store) {
345 		size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
346 		const void *hash = area->hashes + idx * TEE_SHA256_HASH_SIZE;
347 
348 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
349 				TEE_SUCCESS) {
350 			EMSG("PH 0x%" PRIxVA " failed", page_va);
351 			panic();
352 		}
353 	}
354 }
355 
356 static bool tee_pager_unhide_page(vaddr_t page_va)
357 {
358 	struct tee_pager_pmem *pmem;
359 
360 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
361 		paddr_t pa;
362 		uint32_t attr;
363 
364 		if (pmem->pgidx == INVALID_PGIDX)
365 			continue;
366 
367 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
368 				   &pa, &attr);
369 
370 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
371 			continue;
372 
373 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
374 		    pmem->pgidx) {
375 			/* page is hidden, show and move to back */
376 			assert(pa == get_pmem_pa(pmem));
377 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
378 					   get_area_mattr(pmem->area));
379 
380 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
381 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
382 
383 			/* TODO only invalidate entry touched above */
384 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
385 
386 			incr_hidden_hits();
387 			return true;
388 		}
389 	}
390 
391 	return false;
392 }
393 
394 static void tee_pager_hide_pages(void)
395 {
396 	struct tee_pager_pmem *pmem;
397 	size_t n = 0;
398 
399 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
400 		paddr_t pa;
401 		uint32_t attr;
402 
403 		if (n >= TEE_PAGER_NHIDE)
404 			break;
405 		n++;
406 
407 		/*
408 		 * we cannot hide pages when pmem->area is not defined as
409 		 * unhide requires pmem->area to be defined
410 		 */
411 		if (!pmem->area)
412 			continue;
413 
414 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
415 				   &pa, &attr);
416 		if (!(attr & TEE_MATTR_VALID_BLOCK))
417 			continue;
418 
419 		assert(pa == get_pmem_pa(pmem));
420 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
421 				   TEE_MATTR_HIDDEN_BLOCK);
422 
423 	}
424 
425 	/* TODO only invalidate entries touched above */
426 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
427 }
428 
429 /*
430  * Find mapped pmem, hide and move to pageble pmem.
431  * Return false if page was not mapped, and true if page was mapped.
432  */
433 static bool tee_pager_release_one_zi(vaddr_t page_va)
434 {
435 	struct tee_pager_pmem *pmem;
436 	unsigned pgidx;
437 	paddr_t pa;
438 	uint32_t attr;
439 
440 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
441 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
442 
443 #ifdef TEE_PAGER_DEBUG_PRINT
444 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
445 #endif
446 
447 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
448 		if (pmem->pgidx != pgidx)
449 			continue;
450 
451 		assert(pa == get_pmem_pa(pmem));
452 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
453 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
454 		pmem->area = NULL;
455 		pmem->pgidx = INVALID_PGIDX;
456 		tee_pager_npages++;
457 		set_npages();
458 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
459 		incr_zi_released();
460 
461 
462 		return true;
463 	}
464 
465 	return false;
466 }
467 
468 /* Finds the oldest page and remaps it for the new virtual address */
469 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
470 {
471 	struct tee_pager_pmem *pmem;
472 
473 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
474 	if (!pmem) {
475 		DMSG("No pmem entries");
476 		return NULL;
477 	}
478 	if (pmem->pgidx != INVALID_PGIDX) {
479 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
480 		/* TODO only invalidate entries touched above */
481 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
482 	}
483 
484 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
485 	pmem->pgidx = INVALID_PGIDX;
486 	pmem->area = NULL;
487 	if (area->store) {
488 		/* move page to back */
489 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
490 	} else {
491 		/* Move page to rw list */
492 		TEE_ASSERT(tee_pager_npages > 0);
493 		tee_pager_npages--;
494 		set_npages();
495 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
496 	}
497 
498 	return pmem;
499 }
500 
501 static bool pager_check_access(struct abort_info *ai)
502 {
503 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
504 	uint32_t attr;
505 
506 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, NULL, &attr);
507 
508 	/* Not mapped */
509 	if (!(attr & TEE_MATTR_VALID_BLOCK))
510 		return false;
511 
512 	/* Not readable, should not happen */
513 	if (!(attr & TEE_MATTR_PR)) {
514 		abort_print_error(ai);
515 		panic();
516 	}
517 
518 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
519 	case CORE_MMU_FAULT_TRANSLATION:
520 	case CORE_MMU_FAULT_READ_PERMISSION:
521 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
522 		    !(attr & TEE_MATTR_PX)) {
523 			/* Attempting to execute from an NOX page */
524 			abort_print_error(ai);
525 			panic();
526 		}
527 		/* Since the page is mapped now it's OK */
528 		return true;
529 	case CORE_MMU_FAULT_WRITE_PERMISSION:
530 		if (!(attr & TEE_MATTR_PW)) {
531 			/* Attempting to write to an RO page */
532 			abort_print_error(ai);
533 			panic();
534 		}
535 		return true;
536 	default:
537 		/* Some fault we can't deal with */
538 		abort_print_error(ai);
539 		panic();
540 	}
541 
542 }
543 
544 #ifdef CFG_TEE_CORE_DEBUG
545 static void stat_handle_fault(void)
546 {
547 	static size_t num_faults;
548 	static size_t min_npages = SIZE_MAX;
549 	static size_t total_min_npages = SIZE_MAX;
550 
551 	num_faults++;
552 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
553 		DMSG("nfaults %zu npages %zu (min %zu)",
554 		     num_faults, tee_pager_npages, min_npages);
555 		min_npages = tee_pager_npages; /* reset */
556 	}
557 	if (tee_pager_npages < min_npages)
558 		min_npages = tee_pager_npages;
559 	if (tee_pager_npages < total_min_npages)
560 		total_min_npages = tee_pager_npages;
561 }
562 #else
563 static void stat_handle_fault(void)
564 {
565 }
566 #endif
567 
568 void tee_pager_handle_fault(struct abort_info *ai)
569 {
570 	struct tee_pager_area *area;
571 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
572 	uint32_t exceptions;
573 
574 #ifdef TEE_PAGER_DEBUG_PRINT
575 	abort_print(ai);
576 #endif
577 
578 	/*
579 	 * We're updating pages that can affect several active CPUs at a
580 	 * time below. We end up here because a thread tries to access some
581 	 * memory that isn't available. We have to be careful when making
582 	 * that memory available as other threads may succeed in accessing
583 	 * that address the moment after we've made it available.
584 	 *
585 	 * That means that we can't just map the memory and populate the
586 	 * page, instead we use the aliased mapping to populate the page
587 	 * and once everything is ready we map it.
588 	 */
589 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
590 	cpu_spin_lock(&pager_lock);
591 
592 	stat_handle_fault();
593 
594 	/* check if the access is valid */
595 	area = tee_pager_find_area(ai->va);
596 	if (!area) {
597 		abort_print_error(ai);
598 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
599 		panic();
600 	}
601 
602 	if (!tee_pager_unhide_page(page_va)) {
603 		struct tee_pager_pmem *pmem = NULL;
604 
605 		/*
606 		 * The page wasn't hidden, but some other core may have
607 		 * updated the table entry before we got here.
608 		 */
609 		if (pager_check_access(ai)) {
610 			/*
611 			 * Kind of access is OK with the mapping, we're
612 			 * done here because the fault has already been
613 			 * dealt with by another core.
614 			 */
615 			goto out;
616 		}
617 
618 		pmem = tee_pager_get_page(area);
619 		if (!pmem) {
620 			abort_print(ai);
621 			panic();
622 		}
623 
624 		/* load page code & data */
625 		tee_pager_load_page(area, page_va, pmem->va_alias);
626 		tee_pager_verify_page(area, page_va, pmem->va_alias);
627 
628 		/*
629 		 * We've updated the page using the aliased mapping and
630 		 * some cache maintenence is now needed if it's an
631 		 * executable page.
632 		 *
633 		 * Since the d-cache is a Physically-indexed,
634 		 * physically-tagged (PIPT) cache we can clean the aliased
635 		 * address instead of the real virtual address.
636 		 *
637 		 * The i-cache can also be PIPT, but may be something else
638 		 * to, to keep it simple we invalidate the entire i-cache.
639 		 * As a future optimization we may invalidate only the
640 		 * aliased area if it a PIPT cache else the entire cache.
641 		 */
642 		if (area->flags & TEE_PAGER_AREA_X) {
643 			/*
644 			 * Doing these operations to LoUIS (Level of
645 			 * unification, Inner Shareable) would be enough
646 			 */
647 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
648 				pmem->va_alias, SMALL_PAGE_SIZE);
649 
650 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
651 		}
652 
653 		pmem->area = area;
654 		pmem->pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
655 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx,
656 				   get_pmem_pa(pmem), get_area_mattr(area));
657 
658 #ifdef TEE_PAGER_DEBUG_PRINT
659 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
660 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx),
661 				     get_pmem_pa(pmem));
662 #endif
663 
664 	}
665 
666 	tee_pager_hide_pages();
667 out:
668 	cpu_spin_unlock(&pager_lock);
669 	thread_unmask_exceptions(exceptions);
670 }
671 
672 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
673 {
674 	size_t n;
675 
676 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
677 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
678 
679 	/* setup memory */
680 	for (n = 0; n < npages; n++) {
681 		struct tee_pager_pmem *pmem;
682 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
683 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
684 		paddr_t pa;
685 		uint32_t attr;
686 
687 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
688 
689 		/* Ignore unmapped pages/blocks */
690 		if (!(attr & TEE_MATTR_VALID_BLOCK))
691 			continue;
692 
693 		pmem = malloc(sizeof(struct tee_pager_pmem));
694 		if (pmem == NULL) {
695 			DMSG("Can't allocate memory");
696 			panic();
697 		}
698 
699 		pmem->va_alias = pager_add_alias_page(pa);
700 
701 		if (unmap) {
702 			pmem->area = NULL;
703 			pmem->pgidx = INVALID_PGIDX;
704 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, 0, 0);
705 		} else {
706 			/*
707 			 * The page is still mapped, let's assign the area
708 			 * and update the protection bits accordingly.
709 			 */
710 			pmem->area = tee_pager_find_area(va);
711 			pmem->pgidx = pgidx;
712 			assert(pa == get_pmem_pa(pmem));
713 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
714 					   get_area_mattr(pmem->area));
715 		}
716 
717 		tee_pager_npages++;
718 		incr_npages_all();
719 		set_npages();
720 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
721 	}
722 
723 	/* Invalidate secure TLB */
724 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
725 }
726 
727 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
728 {
729 	bool unmaped = false;
730 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
731 
732 	cpu_spin_lock(&pager_lock);
733 
734 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
735 		panic();
736 
737 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
738 		unmaped |= tee_pager_release_one_zi(vaddr);
739 
740 	/* Invalidate secure TLB */
741 	if (unmaped)
742 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
743 
744 	cpu_spin_unlock(&pager_lock);
745 	thread_set_exceptions(exceptions);
746 }
747 
748 void *tee_pager_request_zi(size_t size)
749 {
750 	tee_mm_entry_t *mm;
751 
752 	if (!size)
753 		return NULL;
754 
755 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
756 	if (!mm)
757 		return NULL;
758 
759 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
760 
761 	return (void *)tee_mm_get_smem(mm);
762 }
763