xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 57903c16c7dafb08abc57680de73ef4a7c43cb4a)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <mm/tee_pager.h>
41 #include <mm/tee_mm.h>
42 #include <mm/core_mmu.h>
43 #include <tee/arch_svc.h>
44 #include <arm.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <tee_api_defines.h>
47 #include <utee_defines.h>
48 #include <trace.h>
49 
50 struct tee_pager_abort_info {
51 	uint32_t abort_type;
52 	uint32_t fault_descr;
53 	vaddr_t va;
54 	uint32_t pc;
55 	struct thread_abort_regs *regs;
56 };
57 
58 enum tee_pager_fault_type {
59 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
60 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
61 	TEE_PAGER_FAULT_TYPE_IGNORE,
62 };
63 
64 #ifdef CFG_WITH_PAGER
65 struct tee_pager_area {
66 	const uint8_t *hashes;
67 	const uint8_t *store;
68 	uint32_t flags;
69 	tee_mm_entry_t *mm;
70 	TAILQ_ENTRY(tee_pager_area) link;
71 };
72 
73 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
74 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
75 
76 /*
77  * struct tee_pager_pmem - Represents a physical page used for paging.
78  *
79  * @pgidx	an index of the entry in tbl_info. The actual physical
80  *		address is stored here so even if the page isn't mapped,
81  *		there's always an MMU entry holding the physical address.
82  *
83  * @area	a pointer to the pager area
84  */
85 struct tee_pager_pmem {
86 	unsigned pgidx;
87 	struct tee_pager_area *area;
88 	 TAILQ_ENTRY(tee_pager_pmem) link;
89 };
90 
91 /* The list of physical pages. The first page in the list is the oldest */
92 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
93 
94 static struct tee_pager_pmem_head tee_pager_pmem_head =
95 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
96 
97 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
98 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
99 
100 /* number of pages hidden */
101 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
102 
103 /* Number of registered physical pages, used hiding pages. */
104 static size_t tee_pager_npages;
105 
106 /*
107  * Reference to translation table used to map the virtual memory range
108  * covered by the pager.
109  */
110 static struct core_mmu_table_info tbl_info;
111 
112 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
113 		const void *hashes)
114 {
115 	struct tee_pager_area *area;
116 	size_t tbl_va_size;
117 
118 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
119 		tee_mm_get_smem(mm),
120 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
121 		flags, store, hashes);
122 
123 	if (flags & TEE_PAGER_AREA_RO)
124 		TEE_ASSERT(store && hashes);
125 	else if (flags & TEE_PAGER_AREA_RW)
126 		TEE_ASSERT(!store && !hashes);
127 	else
128 		panic();
129 
130 	if (!tbl_info.num_entries) {
131 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
132 					&tbl_info))
133 			return false;
134 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
135 			DMSG("Unsupported page size in translation table %u",
136 			     1 << tbl_info.shift);
137 			return false;
138 		}
139 	}
140 
141 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
142 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
143 				   tbl_info.va_base, tbl_va_size)) {
144 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
145 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
146 			tbl_info.va_base, tbl_va_size);
147 		return false;
148 	}
149 
150 
151 
152 	area = malloc(sizeof(struct tee_pager_area));
153 	if (!area)
154 		return false;
155 
156 
157 	area->mm = mm;
158 	area->flags = flags;
159 	area->store = store;
160 	area->hashes = hashes;
161 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
162 	return true;
163 }
164 
165 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
166 {
167 	struct tee_pager_area *area;
168 
169 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
170 		tee_mm_entry_t *mm = area->mm;
171 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
172 
173 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
174 			return area;
175 	}
176 	return NULL;
177 }
178 
179 static uint32_t get_area_mattr(struct tee_pager_area *area __unused)
180 {
181 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
182 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
183 
184 	attr |= TEE_MATTR_PRWX;
185 
186 	return attr;
187 }
188 
189 
190 
191 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
192 {
193 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
194 
195 	if (area->store) {
196 		size_t rel_pg_idx = pg_idx - area->mm->offset;
197 		const void *stored_page = area->store +
198 					  rel_pg_idx * SMALL_PAGE_SIZE;
199 
200 		memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
201 	} else {
202 		memset((void *)page_va, 0, SMALL_PAGE_SIZE);
203 	}
204 }
205 
206 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
207 {
208 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
209 
210 	if (area->store) {
211 		size_t rel_pg_idx = pg_idx - area->mm->offset;
212 		const void *hash = area->hashes +
213 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
214 
215 		if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
216 				TEE_SUCCESS) {
217 			EMSG("PH 0x%" PRIxVA " failed", page_va);
218 			panic();
219 		}
220 	}
221 }
222 
223 static bool tee_pager_unhide_page(vaddr_t page_va)
224 {
225 	struct tee_pager_pmem *pmem;
226 
227 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
228 		paddr_t pa;
229 		uint32_t attr;
230 
231 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
232 
233 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
234 			continue;
235 
236 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
237 			/* page is hidden, show and move to back */
238 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
239 					   get_area_mattr(pmem->area));
240 
241 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
242 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
243 
244 			/* TODO only invalidate entry touched above */
245 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
246 			return true;
247 		}
248 	}
249 
250 	return false;
251 }
252 
253 static void tee_pager_hide_pages(void)
254 {
255 	struct tee_pager_pmem *pmem;
256 	size_t n = 0;
257 
258 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
259 		paddr_t pa;
260 		uint32_t attr;
261 
262 		if (n >= TEE_PAGER_NHIDE)
263 			break;
264 		n++;
265 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
266 		if (!(attr & TEE_MATTR_VALID_BLOCK))
267 			continue;
268 
269 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
270 				   TEE_MATTR_HIDDEN_BLOCK);
271 
272 	}
273 
274 	/* TODO only invalidate entries touched above */
275 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
276 }
277 #endif /*CFG_WITH_PAGER*/
278 
279 #ifdef ARM32
280 /* Returns true if the exception originated from user mode */
281 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
282 {
283 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
284 }
285 #endif /*ARM32*/
286 
287 #ifdef ARM64
288 /* Returns true if the exception originated from user mode */
289 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
290 {
291 	uint32_t spsr = ai->regs->spsr;
292 
293 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
294 		return true;
295 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
296 	    SPSR_64_MODE_EL0)
297 		return true;
298 	return false;
299 }
300 #endif /*ARM64*/
301 
302 #ifdef ARM32
303 /* Returns true if the exception originated from abort mode */
304 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
305 {
306 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
307 }
308 #endif /*ARM32*/
309 
310 #ifdef ARM64
311 /* Returns true if the exception originated from abort mode */
312 static bool tee_pager_is_abort_in_abort_handler(
313 		struct tee_pager_abort_info *ai __unused)
314 {
315 	return false;
316 }
317 #endif /*ARM64*/
318 
319 static __unused const char *abort_type_to_str(uint32_t abort_type)
320 {
321 	if (abort_type == THREAD_ABORT_DATA)
322 		return "data";
323 	if (abort_type == THREAD_ABORT_PREFETCH)
324 		return "prefetch";
325 	return "undef";
326 }
327 
328 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
329 {
330 #ifdef CFG_TEE_CORE_TA_TRACE
331 	EMSG_RAW("\nUser TA %s-abort at address 0x%" PRIxVA,
332 		abort_type_to_str(ai->abort_type), ai->va);
333 #ifdef ARM32
334 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x   ttbr1 0x%08x   cidr 0x%X\n",
335 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
336 		 read_contextidr());
337 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
338 		 get_core_pos(), ai->regs->spsr);
339 	EMSG_RAW(" r0 0x%08x     r4 0x%08x     r8 0x%08x    r12 0x%08x\n",
340 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
341 	EMSG_RAW(" r1 0x%08x     r5 0x%08x     r9 0x%08x     sp 0x%08x\n",
342 		 ai->regs->r1, ai->regs->r5, ai->regs->r9, read_usr_sp());
343 	EMSG_RAW(" r2 0x%08x     r6 0x%08x    r10 0x%08x     lr 0x%08x\n",
344 		 ai->regs->r2, ai->regs->r6, ai->regs->r10, read_usr_lr());
345 	EMSG_RAW(" r3 0x%08x     r7 0x%08x    r11 0x%08x     pc 0x%08x\n",
346 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
347 #endif
348 #ifdef ARM64
349 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
350 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
351 		 read_contextidr_el1());
352 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
353 		 get_core_pos(), (uint32_t)ai->regs->spsr);
354 	EMSG_RAW(" r0 0x%08x     r4 0x%08x     r8 0x%08x    r12 0x%08x\n",
355 		 (uint32_t)ai->regs->x0, (uint32_t)ai->regs->x4,
356 		 (uint32_t)ai->regs->x8, (uint32_t)ai->regs->x12);
357 	EMSG_RAW(" r1 0x%08x     r5 0x%08x     r9 0x%08x     sp 0x%08x\n",
358 		 (uint32_t)ai->regs->x1, (uint32_t)ai->regs->x5,
359 		 (uint32_t)ai->regs->x9, (uint32_t)ai->regs->x13);
360 	EMSG_RAW(" r2 0x%08x     r6 0x%08x    r10 0x%08x     lr 0x%08x\n",
361 		 (uint32_t)ai->regs->x2, (uint32_t)ai->regs->x6,
362 		 (uint32_t)ai->regs->x10, (uint32_t)ai->regs->x14);
363 	EMSG_RAW(" r3 0x%08x     r7 0x%08x    r11 0x%08x     pc 0x%08x\n",
364 		 (uint32_t)ai->regs->x3, (uint32_t)ai->regs->x7,
365 		 (uint32_t)ai->regs->x11, (uint32_t)ai->pc);
366 #endif
367 
368 	tee_ta_dump_current();
369 #endif
370 }
371 
372 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
373 {
374 #ifdef ARM32
375 	DMSG("%s-abort at 0x%" PRIxVA ": FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X",
376 	     abort_type_to_str(ai->abort_type),
377 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr());
378 	DMSG("CPUID 0x%x SPSR_abt 0x%x",
379 	     read_mpidr(), read_spsr());
380 #endif /*ARM32*/
381 #ifdef ARM64
382 	DMSG("%s-abort at 0x%" PRIxVA ": ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X",
383 	     abort_type_to_str(ai->abort_type),
384 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
385 	     read_contextidr_el1());
386 	DMSG("CPUID 0x%" PRIx64 " SPSR 0x%x",
387 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
388 #endif /*ARM64*/
389 }
390 
391 static void tee_pager_print_error_abort(
392 		struct tee_pager_abort_info *ai __unused)
393 {
394 #ifdef ARM32
395 	EMSG("%s-abort at 0x%" PRIxVA "\n"
396 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
397 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
398 	     abort_type_to_str(ai->abort_type),
399 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
400 	     read_mpidr(), read_spsr());
401 #endif /*ARM32*/
402 #ifdef ARM64
403 	EMSG("%s-abort at 0x%" PRIxVA "\n"
404 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
405 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
406 	     abort_type_to_str(ai->abort_type),
407 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
408 	     read_contextidr_el1(),
409 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
410 	DMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
411 		 ai->regs->x0, ai->regs->x1);
412 	DMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
413 		 ai->regs->x2, ai->regs->x3);
414 	DMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
415 		 ai->regs->x4, ai->regs->x5);
416 	DMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
417 		 ai->regs->x6, ai->regs->x7);
418 	DMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
419 		 ai->regs->x8, ai->regs->x9);
420 	DMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
421 		 ai->regs->x10, ai->regs->x11);
422 	DMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
423 		 ai->regs->x12, ai->regs->x13);
424 	DMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
425 		 ai->regs->x14, ai->regs->x15);
426 	DMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
427 		 ai->regs->x16, ai->regs->x17);
428 	DMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
429 		 ai->regs->x18, ai->regs->x19);
430 	DMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
431 		 ai->regs->x20, ai->regs->x21);
432 	DMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
433 		 ai->regs->x22, ai->regs->x23);
434 	DMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
435 		 ai->regs->x24, ai->regs->x25);
436 	DMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
437 		 ai->regs->x26, ai->regs->x27);
438 	DMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
439 		 ai->regs->x28, ai->regs->x29);
440 	DMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
441 		 ai->regs->x30, ai->regs->elr);
442 	DMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
443 #endif /*ARM64*/
444 }
445 
446 
447 static enum tee_pager_fault_type tee_pager_get_fault_type(
448 		struct tee_pager_abort_info *ai)
449 {
450 
451 	/* In case of multithreaded version, this section must be protected */
452 	if (tee_pager_is_user_exception(ai)) {
453 		tee_pager_print_user_abort(ai);
454 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
455 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
456 	}
457 
458 	if (tee_pager_is_abort_in_abort_handler(ai)) {
459 		tee_pager_print_error_abort(ai);
460 		EMSG("[PAGER] abort in abort handler (trap CPU)");
461 		panic();
462 	}
463 
464 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
465 		tee_pager_print_error_abort(ai);
466 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
467 		panic();
468 	}
469 
470 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
471 	case CORE_MMU_FAULT_ALIGNMENT:
472 		tee_pager_print_error_abort(ai);
473 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
474 		panic();
475 		break;
476 
477 	case CORE_MMU_FAULT_DEBUG_EVENT:
478 		tee_pager_print_abort(ai);
479 		DMSG("[TEE_PAGER] Ignoring debug event!");
480 		return TEE_PAGER_FAULT_TYPE_IGNORE;
481 
482 	case CORE_MMU_FAULT_TRANSLATION:
483 	case CORE_MMU_FAULT_PERMISSION:
484 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
485 
486 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
487 		tee_pager_print_abort(ai);
488 		DMSG("[TEE_PAGER] Ignoring async external abort!");
489 		return TEE_PAGER_FAULT_TYPE_IGNORE;
490 
491 	case CORE_MMU_FAULT_OTHER:
492 	default:
493 		tee_pager_print_abort(ai);
494 		DMSG("[TEE_PAGER] Unhandled fault!");
495 		return TEE_PAGER_FAULT_TYPE_IGNORE;
496 	}
497 }
498 
499 
500 #ifdef CFG_WITH_PAGER
501 
502 /* Finds the oldest page and remaps it for the new virtual address */
503 static struct tee_pager_pmem *tee_pager_get_page(
504 		struct tee_pager_abort_info *ai,
505 		struct tee_pager_area *area)
506 {
507 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
508 	struct tee_pager_pmem *pmem;
509 	paddr_t pa;
510 	uint32_t attr;
511 
512 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
513 
514 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
515 
516 	if (attr & TEE_MATTR_PHYS_BLOCK) {
517 		/*
518 		 * There's an pmem entry using this mmu entry, let's use
519 		 * that entry in the new mapping.
520 		 */
521 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
522 			if (pmem->pgidx == pgidx)
523 				break;
524 		}
525 		if (!pmem) {
526 			tee_pager_print_abort(ai);
527 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
528 			panic();
529 		}
530 	} else {
531 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
532 		if (!pmem) {
533 			tee_pager_print_abort(ai);
534 			DMSG("No pmem entries");
535 			panic();
536 		}
537 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
538 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
539 	}
540 
541 	pmem->pgidx = pgidx;
542 	pmem->area = area;
543 	core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
544 
545 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
546 	if (area->store) {
547 		/* move page to back */
548 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
549 	} else {
550 		/* Move page to rw list */
551 		TEE_ASSERT(tee_pager_npages > 0);
552 		tee_pager_npages--;
553 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
554 	}
555 
556 	/* TODO only invalidate entries touched above */
557 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
558 
559 #ifdef TEE_PAGER_DEBUG_PRINT
560 	DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
561 #endif
562 
563 	return pmem;
564 }
565 
566 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
567 {
568 	struct tee_pager_area *area;
569 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
570 
571 #ifdef TEE_PAGER_DEBUG_PRINT
572 	tee_pager_print_abort(ai);
573 #endif
574 
575 	/* check if the access is valid */
576 	area = tee_pager_find_area(ai->va);
577 	if (!area) {
578 		tee_pager_print_abort(ai);
579 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
580 		panic();
581 	}
582 
583 	if (!tee_pager_unhide_page(page_va)) {
584 		/* the page wasn't hidden */
585 		tee_pager_get_page(ai, area);
586 
587 		/* load page code & data */
588 		tee_pager_load_page(area, page_va);
589 		/* TODO remap readonly if TEE_PAGER_AREA_RO */
590 		tee_pager_verify_page(area, page_va);
591 		/* TODO remap executable if TEE_PAGER_AREA_X */
592 
593 		if (area->flags & TEE_PAGER_AREA_X) {
594 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
595 				(void *)page_va, SMALL_PAGE_SIZE);
596 
597 			cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
598 				(void *)page_va, SMALL_PAGE_SIZE);
599 		}
600 	}
601 
602 	tee_pager_hide_pages();
603 	/* end protect (multithreded version) */
604 }
605 
606 #else /*CFG_WITH_PAGER*/
607 
608 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
609 {
610 	/*
611 	 * Until PAGER is supported, trap CPU here.
612 	 */
613 	tee_pager_print_error_abort(ai);
614 	EMSG("Unexpected page fault! Trap CPU");
615 	panic();
616 }
617 
618 #endif /*CFG_WITH_PAGER*/
619 
620 #ifdef ARM32
621 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
622 		struct tee_pager_abort_info *ai)
623 {
624 	switch (abort_type) {
625 	case THREAD_ABORT_DATA:
626 		ai->fault_descr = read_dfsr();
627 		ai->va = read_dfar();
628 		break;
629 	case THREAD_ABORT_PREFETCH:
630 		ai->fault_descr = read_ifsr();
631 		ai->va = read_ifar();
632 		break;
633 	default:
634 		ai->fault_descr = 0;
635 		ai->va = regs->elr;
636 		break;
637 	}
638 	ai->abort_type = abort_type;
639 	ai->pc = regs->elr;
640 	ai->regs = regs;
641 }
642 #endif /*ARM32*/
643 
644 #ifdef ARM64
645 static void set_abort_info(uint32_t abort_type __unused,
646 		struct thread_abort_regs *regs, struct tee_pager_abort_info *ai)
647 {
648 	ai->fault_descr = read_esr_el1();
649 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
650 	case ESR_EC_IABT_EL0:
651 	case ESR_EC_IABT_EL1:
652 		ai->abort_type = THREAD_ABORT_PREFETCH;
653 		ai->va = read_far_el1();
654 		break;
655 	case ESR_EC_DABT_EL0:
656 	case ESR_EC_DABT_EL1:
657 	case ESR_EC_SP_ALIGN:
658 		ai->abort_type = THREAD_ABORT_DATA;
659 		ai->va = read_far_el1();
660 		break;
661 	default:
662 		ai->abort_type = THREAD_ABORT_UNDEF;
663 		ai->va = regs->elr;
664 	}
665 	ai->pc = regs->elr;
666 	ai->regs = regs;
667 }
668 #endif /*ARM64*/
669 
670 #ifdef ARM32
671 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
672 {
673 	/*
674 	 * It was a user exception, stop user execution and return
675 	 * to TEE Core.
676 	 */
677 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
678 	ai->regs->r1 = true;
679 	ai->regs->r2 = 0xdeadbeef;
680 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
681 	ai->regs->spsr = read_cpsr();
682 	ai->regs->spsr &= ~CPSR_MODE_MASK;
683 	ai->regs->spsr |= CPSR_MODE_SVC;
684 	ai->regs->spsr &= ~CPSR_FIA;
685 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
686 	/* Select Thumb or ARM mode */
687 	if (ai->regs->elr & 1)
688 		ai->regs->spsr |= CPSR_T;
689 	else
690 		ai->regs->spsr &= ~CPSR_T;
691 }
692 #endif /*ARM32*/
693 
694 #ifdef ARM64
695 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
696 {
697 	uint32_t daif;
698 
699 	/*
700 	 * It was a user exception, stop user execution and return
701 	 * to TEE Core.
702 	 */
703 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
704 	ai->regs->x1 = true;
705 	ai->regs->x2 = 0xdeadbeef;
706 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
707 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
708 
709 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
710 	/* XXX what about DAIF_D? */
711 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
712 }
713 #endif /*ARM64*/
714 
715 void tee_pager_abort_handler(uint32_t abort_type,
716 		struct thread_abort_regs *regs)
717 {
718 	struct tee_pager_abort_info ai;
719 
720 	set_abort_info(abort_type, regs, &ai);
721 
722 	switch (tee_pager_get_fault_type(&ai)) {
723 	case TEE_PAGER_FAULT_TYPE_IGNORE:
724 		break;
725 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
726 		handle_user_ta_panic(&ai);
727 		break;
728 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
729 	default:
730 		tee_pager_handle_fault(&ai);
731 		break;
732 	}
733 }
734 
735 #ifdef CFG_WITH_PAGER
736 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
737 {
738 	size_t n;
739 
740 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
741 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
742 
743 	/* setup memory */
744 	for (n = 0; n < npages; n++) {
745 		struct tee_pager_pmem *pmem;
746 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
747 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
748 		paddr_t pa;
749 		uint32_t attr;
750 
751 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
752 
753 		/* Ignore unmapped pages/blocks */
754 		if (!(attr & TEE_MATTR_VALID_BLOCK))
755 			continue;
756 
757 		pmem = malloc(sizeof(struct tee_pager_pmem));
758 		if (pmem == NULL) {
759 			DMSG("Can't allocate memory");
760 			panic();
761 		}
762 
763 		pmem->pgidx = pgidx;
764 		pmem->area = NULL;
765 
766 		if (unmap) {
767 			/*
768 			 * Note that we're making the page inaccessible
769 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
770 			 * indicate that the descriptor still holds a valid
771 			 * physical address of a page.
772 			 */
773 			core_mmu_set_entry(&tbl_info, pgidx, pa,
774 					   TEE_MATTR_PHYS_BLOCK);
775 		}
776 		tee_pager_npages++;
777 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
778 	}
779 
780 	if (unmap) {
781 		/* Invalidate secure TLB */
782 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
783 	}
784 }
785 #endif /*CFG_WITH_PAGER*/
786