xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision abe38974ad2d4cbb72940f322210364fb3a9a490)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <mm/tee_pager.h>
41 #include <mm/tee_mm.h>
42 #include <mm/core_mmu.h>
43 #include <tee/arch_svc.h>
44 #include <arm.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <tee_api_defines.h>
47 #include <utee_defines.h>
48 #include <trace.h>
49 
50 struct tee_pager_abort_info {
51 	uint32_t abort_type;
52 	uint32_t fault_descr;
53 	vaddr_t va;
54 	uint32_t pc;
55 	struct thread_abort_regs *regs;
56 };
57 
58 enum tee_pager_fault_type {
59 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
60 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
61 	TEE_PAGER_FAULT_TYPE_IGNORE,
62 };
63 
64 #ifdef CFG_WITH_PAGER
65 struct tee_pager_area {
66 	const uint8_t *hashes;
67 	const uint8_t *store;
68 	uint32_t flags;
69 	tee_mm_entry_t *mm;
70 	TAILQ_ENTRY(tee_pager_area) link;
71 };
72 
73 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
74 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
75 
76 /*
77  * struct tee_pager_pmem - Represents a physical page used for paging.
78  *
79  * @pgidx	an index of the entry in tbl_info. The actual physical
80  *		address is stored here so even if the page isn't mapped,
81  *		there's always an MMU entry holding the physical address.
82  *
83  * @area	a pointer to the pager area
84  */
85 struct tee_pager_pmem {
86 	unsigned pgidx;
87 	struct tee_pager_area *area;
88 	 TAILQ_ENTRY(tee_pager_pmem) link;
89 };
90 
91 /* The list of physical pages. The first page in the list is the oldest */
92 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
93 
94 static struct tee_pager_pmem_head tee_pager_pmem_head =
95 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
96 
97 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
98 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
99 
100 /* number of pages hidden */
101 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
102 
103 /* Number of registered physical pages, used hiding pages. */
104 static size_t tee_pager_npages;
105 
106 /*
107  * Reference to translation table used to map the virtual memory range
108  * covered by the pager.
109  */
110 static struct core_mmu_table_info tbl_info;
111 
112 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
113 		const void *hashes)
114 {
115 	struct tee_pager_area *area;
116 	size_t tbl_va_size;
117 
118 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
119 		tee_mm_get_smem(mm),
120 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
121 		flags, store, hashes);
122 
123 	if (flags & TEE_PAGER_AREA_RO)
124 		TEE_ASSERT(store && hashes);
125 	else if (flags & TEE_PAGER_AREA_RW)
126 		TEE_ASSERT(!store && !hashes);
127 	else
128 		panic();
129 
130 	if (!tbl_info.num_entries) {
131 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
132 					&tbl_info))
133 			return false;
134 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
135 			DMSG("Unsupported page size in translation table %u",
136 			     1 << tbl_info.shift);
137 			return false;
138 		}
139 	}
140 
141 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
142 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
143 				   tbl_info.va_base, tbl_va_size)) {
144 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
145 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
146 			tbl_info.va_base, tbl_va_size);
147 		return false;
148 	}
149 
150 
151 
152 	area = malloc(sizeof(struct tee_pager_area));
153 	if (!area)
154 		return false;
155 
156 
157 	area->mm = mm;
158 	area->flags = flags;
159 	area->store = store;
160 	area->hashes = hashes;
161 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
162 	return true;
163 }
164 
165 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
166 {
167 	struct tee_pager_area *area;
168 
169 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
170 		tee_mm_entry_t *mm = area->mm;
171 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
172 
173 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
174 			return area;
175 	}
176 	return NULL;
177 }
178 
179 static uint32_t get_area_mattr(struct tee_pager_area *area __unused)
180 {
181 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
182 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
183 
184 	attr |= TEE_MATTR_PRWX;
185 
186 	return attr;
187 }
188 
189 
190 
191 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
192 {
193 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
194 
195 	if (area->store) {
196 		size_t rel_pg_idx = pg_idx - area->mm->offset;
197 		const void *stored_page = area->store +
198 					  rel_pg_idx * SMALL_PAGE_SIZE;
199 
200 		memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
201 	} else {
202 		memset((void *)page_va, 0, SMALL_PAGE_SIZE);
203 	}
204 }
205 
206 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
207 {
208 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
209 
210 	if (area->store) {
211 		size_t rel_pg_idx = pg_idx - area->mm->offset;
212 		const void *hash = area->hashes +
213 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
214 
215 		if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
216 				TEE_SUCCESS) {
217 			EMSG("PH 0x%" PRIxVA " failed", page_va);
218 			panic();
219 		}
220 	}
221 }
222 
223 static bool tee_pager_unhide_page(vaddr_t page_va)
224 {
225 	struct tee_pager_pmem *pmem;
226 
227 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
228 		paddr_t pa;
229 		uint32_t attr;
230 
231 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
232 
233 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
234 			continue;
235 
236 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
237 			/* page is hidden, show and move to back */
238 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
239 					   get_area_mattr(pmem->area));
240 
241 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
242 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
243 
244 			/* TODO only invalidate entry touched above */
245 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
246 			return true;
247 		}
248 	}
249 
250 	return false;
251 }
252 
253 static void tee_pager_hide_pages(void)
254 {
255 	struct tee_pager_pmem *pmem;
256 	size_t n = 0;
257 
258 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
259 		paddr_t pa;
260 		uint32_t attr;
261 
262 		if (n >= TEE_PAGER_NHIDE)
263 			break;
264 		n++;
265 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
266 		if (!(attr & TEE_MATTR_VALID_BLOCK))
267 			continue;
268 
269 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
270 				   TEE_MATTR_HIDDEN_BLOCK);
271 
272 	}
273 
274 	/* TODO only invalidate entries touched above */
275 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
276 }
277 #endif /*CFG_WITH_PAGER*/
278 
279 #ifdef ARM32
280 /* Returns true if the exception originated from user mode */
281 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
282 {
283 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
284 }
285 #endif /*ARM32*/
286 
287 #ifdef ARM32
288 /* Returns true if the exception originated from abort mode */
289 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
290 {
291 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
292 }
293 #endif /*ARM32*/
294 
295 static __unused const char *abort_type_to_str(uint32_t abort_type)
296 {
297 	if (abort_type == THREAD_ABORT_DATA)
298 		return "data";
299 	if (abort_type == THREAD_ABORT_PREFETCH)
300 		return "prefetch";
301 	return "undef";
302 }
303 
304 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
305 {
306 #ifdef CFG_TEE_CORE_TA_TRACE
307 	EMSG_RAW("\nUser TA %s-abort at address 0x%" PRIxVA,
308 		abort_type_to_str(ai->abort_type), ai->va);
309 #ifdef ARM32
310 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x   ttbr1 0x%08x   cidr 0x%X\n",
311 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
312 		 read_contextidr());
313 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
314 		 get_core_pos(), ai->regs->spsr);
315 	EMSG_RAW(" r0 0x%08x     r4 0x%08x     r8 0x%08x    r12 0x%08x\n",
316 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
317 	EMSG_RAW(" r1 0x%08x     r5 0x%08x     r9 0x%08x     sp 0x%08x\n",
318 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, read_usr_sp());
319 	EMSG_RAW(" r2 0x%08x     r6 0x%08x    r10 0x%08x     lr 0x%08x\n",
320 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, read_usr_lr());
321 	EMSG_RAW(" r3 0x%08x     r7 0x%08x    r11 0x%08x     pc 0x%08x\n",
322 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
323 #endif
324 	tee_ta_dump_current();
325 #endif
326 }
327 
328 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
329 {
330 #ifdef ARM32
331 	DMSG("%s-abort at 0x%" PRIxVA ": FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
332 	     abort_type_to_str(ai->abort_type),
333 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr());
334 	DMSG("CPUID 0x%x SPSR_abt 0x%x",
335 	     read_mpidr(), read_spsr());
336 #endif /*ARM32*/
337 }
338 
339 static void tee_pager_print_error_abort(
340 		struct tee_pager_abort_info *ai __unused)
341 {
342 #ifdef ARM32
343 	EMSG("%s-abort at 0x%" PRIxVA "\n"
344 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
345 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
346 	     abort_type_to_str(ai->abort_type),
347 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
348 	     read_mpidr(), read_spsr());
349 #endif /*ARM32*/
350 }
351 
352 
353 static enum tee_pager_fault_type tee_pager_get_fault_type(
354 		struct tee_pager_abort_info *ai)
355 {
356 
357 	/* In case of multithreaded version, this section must be protected */
358 	if (tee_pager_is_user_exception(ai)) {
359 		tee_pager_print_user_abort(ai);
360 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
361 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
362 	}
363 
364 	if (tee_pager_is_abort_in_abort_handler(ai)) {
365 		tee_pager_print_error_abort(ai);
366 		EMSG("[PAGER] abort in abort handler (trap CPU)");
367 		panic();
368 	}
369 
370 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
371 		tee_pager_print_error_abort(ai);
372 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
373 		panic();
374 	}
375 
376 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
377 	case CORE_MMU_FAULT_ALIGNMENT:
378 		tee_pager_print_error_abort(ai);
379 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
380 		panic();
381 		break;
382 
383 	case CORE_MMU_FAULT_DEBUG_EVENT:
384 		tee_pager_print_abort(ai);
385 		DMSG("[TEE_PAGER] Ignoring debug event!");
386 		return TEE_PAGER_FAULT_TYPE_IGNORE;
387 
388 	case CORE_MMU_FAULT_TRANSLATION:
389 	case CORE_MMU_FAULT_PERMISSION:
390 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
391 
392 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
393 		tee_pager_print_abort(ai);
394 		DMSG("[TEE_PAGER] Ignoring async external abort!");
395 		return TEE_PAGER_FAULT_TYPE_IGNORE;
396 
397 	case CORE_MMU_FAULT_OTHER:
398 	default:
399 		tee_pager_print_abort(ai);
400 		DMSG("[TEE_PAGER] Unhandled fault!");
401 		return TEE_PAGER_FAULT_TYPE_IGNORE;
402 	}
403 }
404 
405 
406 #ifdef CFG_WITH_PAGER
407 
408 /* Finds the oldest page and remaps it for the new virtual address */
409 static struct tee_pager_pmem *tee_pager_get_page(
410 		struct tee_pager_abort_info *ai,
411 		struct tee_pager_area *area)
412 {
413 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
414 	struct tee_pager_pmem *pmem;
415 	paddr_t pa;
416 	uint32_t attr;
417 
418 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
419 
420 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
421 
422 	if (attr & TEE_MATTR_PHYS_BLOCK) {
423 		/*
424 		 * There's an pmem entry using this mmu entry, let's use
425 		 * that entry in the new mapping.
426 		 */
427 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
428 			if (pmem->pgidx == pgidx)
429 				break;
430 		}
431 		if (!pmem) {
432 			tee_pager_print_abort(ai);
433 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
434 			panic();
435 		}
436 	} else {
437 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
438 		if (!pmem) {
439 			tee_pager_print_abort(ai);
440 			DMSG("No pmem entries");
441 			panic();
442 		}
443 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
444 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
445 	}
446 
447 	pmem->pgidx = pgidx;
448 	pmem->area = area;
449 	core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
450 
451 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
452 	if (area->store) {
453 		/* move page to back */
454 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
455 	} else {
456 		/* Move page to rw list */
457 		TEE_ASSERT(tee_pager_npages > 0);
458 		tee_pager_npages--;
459 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
460 	}
461 
462 	/* TODO only invalidate entries touched above */
463 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
464 
465 #ifdef TEE_PAGER_DEBUG_PRINT
466 	DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
467 #endif
468 
469 	return pmem;
470 }
471 
472 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
473 {
474 	struct tee_pager_area *area;
475 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
476 
477 #ifdef TEE_PAGER_DEBUG_PRINT
478 	tee_pager_print_abort(ai);
479 #endif
480 
481 	/* check if the access is valid */
482 	area = tee_pager_find_area(ai->va);
483 	if (!area) {
484 		tee_pager_print_abort(ai);
485 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
486 		panic();
487 	}
488 
489 	if (!tee_pager_unhide_page(page_va)) {
490 		/* the page wasn't hidden */
491 		tee_pager_get_page(ai, area);
492 
493 		/* load page code & data */
494 		tee_pager_load_page(area, page_va);
495 		/* TODO remap readonly if TEE_PAGER_AREA_RO */
496 		tee_pager_verify_page(area, page_va);
497 		/* TODO remap executable if TEE_PAGER_AREA_X */
498 
499 		if (area->flags & TEE_PAGER_AREA_X) {
500 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
501 				(void *)page_va, SMALL_PAGE_SIZE);
502 
503 			cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
504 				(void *)page_va, SMALL_PAGE_SIZE);
505 		}
506 	}
507 
508 	tee_pager_hide_pages();
509 	/* end protect (multithreded version) */
510 }
511 
512 #else /*CFG_WITH_PAGER*/
513 
514 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
515 {
516 	/*
517 	 * Until PAGER is supported, trap CPU here.
518 	 */
519 	tee_pager_print_error_abort(ai);
520 	EMSG("Unexpected page fault! Trap CPU");
521 	panic();
522 }
523 
524 #endif /*CFG_WITH_PAGER*/
525 
526 #ifdef ARM32
527 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
528 		struct tee_pager_abort_info *ai)
529 {
530 	switch (abort_type) {
531 	case THREAD_ABORT_DATA:
532 		ai->fault_descr = read_dfsr();
533 		ai->va = read_dfar();
534 		break;
535 	case THREAD_ABORT_PREFETCH:
536 		ai->fault_descr = read_ifsr();
537 		ai->va = read_ifar();
538 		break;
539 	default:
540 		ai->fault_descr = 0;
541 		ai->va = regs->elr;
542 		break;
543 	}
544 	ai->abort_type = abort_type;
545 	ai->pc = regs->elr;
546 	ai->regs = regs;
547 }
548 #endif /*ARM32*/
549 
550 #ifdef ARM32
551 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
552 {
553 	/*
554 	 * It was a user exception, stop user execution and return
555 	 * to TEE Core.
556 	 */
557 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
558 	ai->regs->r1 = true;
559 	ai->regs->r2 = 0xdeadbeef;
560 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
561 	ai->regs->spsr = read_cpsr();
562 	ai->regs->spsr &= ~CPSR_MODE_MASK;
563 	ai->regs->spsr |= CPSR_MODE_SVC;
564 	ai->regs->spsr &= ~CPSR_FIA;
565 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
566 	/* Select Thumb or ARM mode */
567 	if (ai->regs->elr & 1)
568 		ai->regs->spsr |= CPSR_T;
569 	else
570 		ai->regs->spsr &= ~CPSR_T;
571 }
572 #endif /*ARM32*/
573 
574 void tee_pager_abort_handler(uint32_t abort_type,
575 		struct thread_abort_regs *regs)
576 {
577 	struct tee_pager_abort_info ai;
578 
579 	set_abort_info(abort_type, regs, &ai);
580 
581 	switch (tee_pager_get_fault_type(&ai)) {
582 	case TEE_PAGER_FAULT_TYPE_IGNORE:
583 		break;
584 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
585 		handle_user_ta_panic(&ai);
586 		break;
587 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
588 	default:
589 		tee_pager_handle_fault(&ai);
590 		break;
591 	}
592 }
593 
594 #ifdef CFG_WITH_PAGER
595 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
596 {
597 	size_t n;
598 
599 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
600 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
601 
602 	/* setup memory */
603 	for (n = 0; n < npages; n++) {
604 		struct tee_pager_pmem *pmem;
605 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
606 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
607 		paddr_t pa;
608 		uint32_t attr;
609 
610 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
611 
612 		/* Ignore unmapped pages/blocks */
613 		if (!(attr & TEE_MATTR_VALID_BLOCK))
614 			continue;
615 
616 		pmem = malloc(sizeof(struct tee_pager_pmem));
617 		if (pmem == NULL) {
618 			DMSG("Can't allocate memory");
619 			panic();
620 		}
621 
622 		pmem->pgidx = pgidx;
623 		pmem->area = NULL;
624 
625 		if (unmap) {
626 			/*
627 			 * Note that we're making the page inaccessible
628 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
629 			 * indicate that the descriptor still holds a valid
630 			 * physical address of a page.
631 			 */
632 			core_mmu_set_entry(&tbl_info, pgidx, pa,
633 					   TEE_MATTR_PHYS_BLOCK);
634 		}
635 		tee_pager_npages++;
636 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
637 	}
638 
639 	if (unmap) {
640 		/* Invalidate secure TLB */
641 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
642 	}
643 }
644 #endif /*CFG_WITH_PAGER*/
645