xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 94e8a4fcc5a427874ca716569a6b819d18e6f00c)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <mm/tee_pager.h>
41 #include <mm/tee_mm.h>
42 #include <mm/core_mmu.h>
43 #include <tee/arch_svc.h>
44 #include <arm.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <tee_api_defines.h>
47 #include <utee_defines.h>
48 #include <trace.h>
49 #include <util.h>
50 
51 struct tee_pager_abort_info {
52 	uint32_t abort_type;
53 	uint32_t fault_descr;
54 	vaddr_t va;
55 	uint32_t pc;
56 	struct thread_abort_regs *regs;
57 };
58 
59 enum tee_pager_fault_type {
60 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
61 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
62 	TEE_PAGER_FAULT_TYPE_IGNORE,
63 };
64 
65 #ifdef CFG_WITH_PAGER
66 struct tee_pager_area {
67 	const uint8_t *hashes;
68 	const uint8_t *store;
69 	uint32_t flags;
70 	tee_mm_entry_t *mm;
71 	TAILQ_ENTRY(tee_pager_area) link;
72 };
73 
74 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
75 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
76 
77 /*
78  * struct tee_pager_pmem - Represents a physical page used for paging.
79  *
80  * @pgidx	an index of the entry in tbl_info. The actual physical
81  *		address is stored here so even if the page isn't mapped,
82  *		there's always an MMU entry holding the physical address.
83  *
84  * @area	a pointer to the pager area
85  */
86 struct tee_pager_pmem {
87 	unsigned pgidx;
88 	struct tee_pager_area *area;
89 	 TAILQ_ENTRY(tee_pager_pmem) link;
90 };
91 
92 /* The list of physical pages. The first page in the list is the oldest */
93 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
94 
95 static struct tee_pager_pmem_head tee_pager_pmem_head =
96 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
97 
98 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
99 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
100 
101 /* number of pages hidden */
102 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
103 
104 /* Number of registered physical pages, used hiding pages. */
105 static size_t tee_pager_npages;
106 
107 #ifdef CFG_WITH_STATS
108 static struct tee_pager_stats pager_stats;
109 
110 static inline void incr_ro_hits(void)
111 {
112 	pager_stats.ro_hits++;
113 }
114 
115 static inline void incr_rw_hits(void)
116 {
117 	pager_stats.rw_hits++;
118 }
119 
120 static inline void incr_hidden_hits(void)
121 {
122 	pager_stats.hidden_hits++;
123 }
124 
125 static inline void incr_zi_released(void)
126 {
127 	pager_stats.zi_released++;
128 }
129 
130 static inline void incr_npages_all(void)
131 {
132 	pager_stats.npages_all++;
133 }
134 
135 static inline void set_npages(void)
136 {
137 	pager_stats.npages = tee_pager_npages;
138 }
139 
140 void tee_pager_get_stats(struct tee_pager_stats *stats)
141 {
142 	*stats = pager_stats;
143 
144 	pager_stats.hidden_hits = 0;
145 	pager_stats.ro_hits = 0;
146 	pager_stats.rw_hits = 0;
147 	pager_stats.zi_released = 0;
148 }
149 
150 #else /* CFG_WITH_STATS */
151 static inline void incr_ro_hits(void) { }
152 static inline void incr_rw_hits(void) { }
153 static inline void incr_hidden_hits(void) { }
154 static inline void incr_zi_released(void) { }
155 static inline void incr_npages_all(void) { }
156 static inline void set_npages(void) { }
157 
158 void tee_pager_get_stats(struct tee_pager_stats *stats)
159 {
160 	memset(stats, 0, sizeof(struct tee_pager_stats));
161 }
162 #endif /* CFG_WITH_STATS */
163 
164 /*
165  * Reference to translation table used to map the virtual memory range
166  * covered by the pager.
167  */
168 static struct core_mmu_table_info tbl_info;
169 
170 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
171 		const void *hashes)
172 {
173 	struct tee_pager_area *area;
174 	size_t tbl_va_size;
175 
176 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
177 		tee_mm_get_smem(mm),
178 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
179 		flags, store, hashes);
180 
181 	if (flags & TEE_PAGER_AREA_RO)
182 		TEE_ASSERT(store && hashes);
183 	else if (flags & TEE_PAGER_AREA_RW)
184 		TEE_ASSERT(!store && !hashes);
185 	else
186 		panic();
187 
188 	if (!tbl_info.num_entries) {
189 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
190 					&tbl_info))
191 			return false;
192 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
193 			DMSG("Unsupported page size in translation table %u",
194 			     1 << tbl_info.shift);
195 			return false;
196 		}
197 	}
198 
199 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
200 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
201 				   tbl_info.va_base, tbl_va_size)) {
202 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
203 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
204 			tbl_info.va_base, tbl_va_size);
205 		return false;
206 	}
207 
208 
209 
210 	area = malloc(sizeof(struct tee_pager_area));
211 	if (!area)
212 		return false;
213 
214 
215 	area->mm = mm;
216 	area->flags = flags;
217 	area->store = store;
218 	area->hashes = hashes;
219 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
220 	return true;
221 }
222 
223 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
224 {
225 	struct tee_pager_area *area;
226 
227 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
228 		tee_mm_entry_t *mm = area->mm;
229 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
230 
231 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
232 			return area;
233 	}
234 	return NULL;
235 }
236 
237 static uint32_t get_area_mattr(struct tee_pager_area *area)
238 {
239 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
240 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
241 
242 	attr |= TEE_MATTR_PRW;
243 	if (area->flags & TEE_PAGER_AREA_X)
244 		attr |= TEE_MATTR_PX;
245 
246 	return attr;
247 }
248 
249 
250 
251 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
252 {
253 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
254 
255 	if (area->store) {
256 		size_t rel_pg_idx = pg_idx - area->mm->offset;
257 		const void *stored_page = area->store +
258 					  rel_pg_idx * SMALL_PAGE_SIZE;
259 
260 		memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
261 		incr_ro_hits();
262 	} else {
263 		memset((void *)page_va, 0, SMALL_PAGE_SIZE);
264 		incr_rw_hits();
265 	}
266 }
267 
268 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
269 {
270 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
271 
272 	if (area->store) {
273 		size_t rel_pg_idx = pg_idx - area->mm->offset;
274 		const void *hash = area->hashes +
275 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
276 
277 		if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
278 				TEE_SUCCESS) {
279 			EMSG("PH 0x%" PRIxVA " failed", page_va);
280 			panic();
281 		}
282 	}
283 }
284 
285 static bool tee_pager_unhide_page(vaddr_t page_va)
286 {
287 	struct tee_pager_pmem *pmem;
288 
289 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
290 		paddr_t pa;
291 		uint32_t attr;
292 
293 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
294 
295 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
296 			continue;
297 
298 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
299 			/* page is hidden, show and move to back */
300 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
301 					   get_area_mattr(pmem->area));
302 
303 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
304 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
305 
306 			/* TODO only invalidate entry touched above */
307 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
308 
309 			incr_hidden_hits();
310 			return true;
311 		}
312 	}
313 
314 	return false;
315 }
316 
317 static void tee_pager_hide_pages(void)
318 {
319 	struct tee_pager_pmem *pmem;
320 	size_t n = 0;
321 
322 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
323 		paddr_t pa;
324 		uint32_t attr;
325 
326 		if (n >= TEE_PAGER_NHIDE)
327 			break;
328 		n++;
329 
330 		/*
331 		 * we cannot hide pages when pmem->area is not defined as
332 		 * unhide requires pmem->area to be defined
333 		 */
334 		if (!pmem->area)
335 			continue;
336 
337 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
338 		if (!(attr & TEE_MATTR_VALID_BLOCK))
339 			continue;
340 
341 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
342 				   TEE_MATTR_HIDDEN_BLOCK);
343 
344 	}
345 
346 	/* TODO only invalidate entries touched above */
347 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
348 }
349 
350 /*
351  * Find mapped pmem, hide and move to pageble pmem.
352  * Return false if page was not mapped, and true if page was mapped.
353  */
354 static bool tee_pager_release_one_zi(vaddr_t page_va)
355 {
356 	struct tee_pager_pmem *pmem;
357 	unsigned pgidx;
358 	paddr_t pa;
359 	uint32_t attr;
360 
361 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
362 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
363 
364 #ifdef TEE_PAGER_DEBUG_PRINT
365 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
366 #endif
367 
368 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
369 		if (pmem->pgidx != pgidx)
370 			continue;
371 
372 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
373 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
374 		tee_pager_npages++;
375 		set_npages();
376 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
377 		incr_zi_released();
378 
379 
380 		return true;
381 	}
382 
383 	return false;
384 }
385 #endif /*CFG_WITH_PAGER*/
386 
387 #ifdef ARM32
388 /* Returns true if the exception originated from user mode */
389 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
390 {
391 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
392 }
393 #endif /*ARM32*/
394 
395 #ifdef ARM64
396 /* Returns true if the exception originated from user mode */
397 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
398 {
399 	uint32_t spsr = ai->regs->spsr;
400 
401 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
402 		return true;
403 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
404 	    SPSR_64_MODE_EL0)
405 		return true;
406 	return false;
407 }
408 #endif /*ARM64*/
409 
410 #ifdef ARM32
411 /* Returns true if the exception originated from abort mode */
412 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
413 {
414 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
415 }
416 #endif /*ARM32*/
417 
418 #ifdef ARM64
419 /* Returns true if the exception originated from abort mode */
420 static bool tee_pager_is_abort_in_abort_handler(
421 		struct tee_pager_abort_info *ai __unused)
422 {
423 	return false;
424 }
425 #endif /*ARM64*/
426 
427 static __unused const char *abort_type_to_str(uint32_t abort_type)
428 {
429 	if (abort_type == THREAD_ABORT_DATA)
430 		return "data";
431 	if (abort_type == THREAD_ABORT_PREFETCH)
432 		return "prefetch";
433 	return "undef";
434 }
435 
436 static __unused void tee_pager_print_detailed_abort(
437 				struct tee_pager_abort_info *ai __unused,
438 				const char *ctx __unused)
439 {
440 	EMSG_RAW("\n");
441 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "\n",
442 		ctx, abort_type_to_str(ai->abort_type), ai->va);
443 #ifdef ARM32
444 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
445 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
446 		 read_contextidr());
447 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
448 		 get_core_pos(), ai->regs->spsr);
449 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
450 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
451 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
452 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
453 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
454 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
455 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
456 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
457 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
458 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
459 #endif /*ARM32*/
460 #ifdef ARM64
461 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
462 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
463 		 read_contextidr_el1());
464 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
465 		 get_core_pos(), (uint32_t)ai->regs->spsr);
466 	EMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
467 		 ai->regs->x0, ai->regs->x1);
468 	EMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
469 		 ai->regs->x2, ai->regs->x3);
470 	EMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
471 		 ai->regs->x4, ai->regs->x5);
472 	EMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
473 		 ai->regs->x6, ai->regs->x7);
474 	EMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
475 		 ai->regs->x8, ai->regs->x9);
476 	EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
477 		 ai->regs->x10, ai->regs->x11);
478 	EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
479 		 ai->regs->x12, ai->regs->x13);
480 	EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
481 		 ai->regs->x14, ai->regs->x15);
482 	EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
483 		 ai->regs->x16, ai->regs->x17);
484 	EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
485 		 ai->regs->x18, ai->regs->x19);
486 	EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
487 		 ai->regs->x20, ai->regs->x21);
488 	EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
489 		 ai->regs->x22, ai->regs->x23);
490 	EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
491 		 ai->regs->x24, ai->regs->x25);
492 	EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
493 		 ai->regs->x26, ai->regs->x27);
494 	EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
495 		 ai->regs->x28, ai->regs->x29);
496 	EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
497 		 ai->regs->x30, ai->regs->elr);
498 	EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
499 #endif /*ARM64*/
500 }
501 
502 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
503 {
504 #ifdef CFG_TEE_CORE_TA_TRACE
505 	tee_pager_print_detailed_abort(ai, "user TA");
506 	tee_ta_dump_current();
507 #endif
508 }
509 
510 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
511 {
512 #if (TRACE_LEVEL >= TRACE_INFO)
513 	tee_pager_print_detailed_abort(ai, "core");
514 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
515 }
516 
517 static void tee_pager_print_error_abort(
518 		struct tee_pager_abort_info *ai __unused)
519 {
520 #if (TRACE_LEVEL >= TRACE_INFO)
521 	/* full verbose log at DEBUG level */
522 	tee_pager_print_detailed_abort(ai, "core");
523 #else
524 #ifdef ARM32
525 	EMSG("%s-abort at 0x%" PRIxVA "\n"
526 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
527 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
528 	     abort_type_to_str(ai->abort_type),
529 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
530 	     read_mpidr(), read_spsr());
531 #endif /*ARM32*/
532 #ifdef ARM64
533 	EMSG("%s-abort at 0x%" PRIxVA "\n"
534 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
535 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
536 	     abort_type_to_str(ai->abort_type),
537 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
538 	     read_contextidr_el1(),
539 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
540 #endif /*ARM64*/
541 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
542 }
543 
544 static enum tee_pager_fault_type tee_pager_get_fault_type(
545 		struct tee_pager_abort_info *ai)
546 {
547 
548 	/* In case of multithreaded version, this section must be protected */
549 	if (tee_pager_is_user_exception(ai)) {
550 		tee_pager_print_user_abort(ai);
551 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
552 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
553 	}
554 
555 	if (tee_pager_is_abort_in_abort_handler(ai)) {
556 		tee_pager_print_error_abort(ai);
557 		EMSG("[PAGER] abort in abort handler (trap CPU)");
558 		panic();
559 	}
560 
561 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
562 		tee_pager_print_error_abort(ai);
563 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
564 		panic();
565 	}
566 
567 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
568 	case CORE_MMU_FAULT_ALIGNMENT:
569 		tee_pager_print_error_abort(ai);
570 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
571 		panic();
572 		break;
573 
574 	case CORE_MMU_FAULT_ACCESS_BIT:
575 		tee_pager_print_error_abort(ai);
576 		EMSG("[TEE_PAGER] access bit fault!  (trap CPU)");
577 		panic();
578 		break;
579 
580 	case CORE_MMU_FAULT_DEBUG_EVENT:
581 		tee_pager_print_abort(ai);
582 		DMSG("[TEE_PAGER] Ignoring debug event!");
583 		return TEE_PAGER_FAULT_TYPE_IGNORE;
584 
585 	case CORE_MMU_FAULT_TRANSLATION:
586 	case CORE_MMU_FAULT_PERMISSION:
587 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
588 
589 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
590 		tee_pager_print_abort(ai);
591 		DMSG("[TEE_PAGER] Ignoring async external abort!");
592 		return TEE_PAGER_FAULT_TYPE_IGNORE;
593 
594 	case CORE_MMU_FAULT_OTHER:
595 	default:
596 		tee_pager_print_abort(ai);
597 		DMSG("[TEE_PAGER] Unhandled fault!");
598 		return TEE_PAGER_FAULT_TYPE_IGNORE;
599 	}
600 }
601 
602 
603 #ifdef CFG_WITH_PAGER
604 
605 /* Finds the oldest page and remaps it for the new virtual address */
606 static struct tee_pager_pmem *tee_pager_get_page(
607 		struct tee_pager_abort_info *ai,
608 		struct tee_pager_area *area)
609 {
610 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
611 	struct tee_pager_pmem *pmem;
612 	paddr_t pa;
613 	uint32_t attr;
614 
615 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
616 
617 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
618 
619 	if (attr & TEE_MATTR_PHYS_BLOCK) {
620 		/*
621 		 * There's an pmem entry using this mmu entry, let's use
622 		 * that entry in the new mapping.
623 		 */
624 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
625 			if (pmem->pgidx == pgidx)
626 				break;
627 		}
628 		if (!pmem) {
629 			tee_pager_print_abort(ai);
630 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
631 			panic();
632 		}
633 	} else {
634 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
635 		if (!pmem) {
636 			tee_pager_print_abort(ai);
637 			DMSG("No pmem entries");
638 			panic();
639 		}
640 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
641 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
642 	}
643 
644 	pmem->pgidx = pgidx;
645 	pmem->area = area;
646 	core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
647 
648 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
649 	if (area->store) {
650 		/* move page to back */
651 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
652 	} else {
653 		/* Move page to rw list */
654 		TEE_ASSERT(tee_pager_npages > 0);
655 		tee_pager_npages--;
656 		set_npages();
657 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
658 	}
659 
660 	/* TODO only invalidate entries touched above */
661 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
662 
663 #ifdef TEE_PAGER_DEBUG_PRINT
664 	DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
665 #endif
666 
667 	return pmem;
668 }
669 
670 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
671 {
672 	struct tee_pager_area *area;
673 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
674 
675 #ifdef TEE_PAGER_DEBUG_PRINT
676 	tee_pager_print_abort(ai);
677 #endif
678 
679 	/* check if the access is valid */
680 	area = tee_pager_find_area(ai->va);
681 	if (!area) {
682 		tee_pager_print_abort(ai);
683 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
684 		panic();
685 	}
686 
687 	if (!tee_pager_unhide_page(page_va)) {
688 		/* the page wasn't hidden */
689 		tee_pager_get_page(ai, area);
690 
691 		/* load page code & data */
692 		tee_pager_load_page(area, page_va);
693 		/* TODO remap readonly if TEE_PAGER_AREA_RO */
694 		tee_pager_verify_page(area, page_va);
695 		/* TODO remap executable if TEE_PAGER_AREA_X */
696 
697 		if (area->flags & TEE_PAGER_AREA_X) {
698 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
699 				(void *)page_va, SMALL_PAGE_SIZE);
700 
701 			cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
702 				(void *)page_va, SMALL_PAGE_SIZE);
703 		}
704 	}
705 
706 	tee_pager_hide_pages();
707 	/* end protect (multithreded version) */
708 }
709 
710 #else /*CFG_WITH_PAGER*/
711 
712 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
713 {
714 	/*
715 	 * Until PAGER is supported, trap CPU here.
716 	 */
717 	tee_pager_print_error_abort(ai);
718 	EMSG("Unexpected page fault! Trap CPU");
719 	panic();
720 }
721 
722 #endif /*CFG_WITH_PAGER*/
723 
724 #ifdef ARM32
725 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
726 		struct tee_pager_abort_info *ai)
727 {
728 	switch (abort_type) {
729 	case THREAD_ABORT_DATA:
730 		ai->fault_descr = read_dfsr();
731 		ai->va = read_dfar();
732 		break;
733 	case THREAD_ABORT_PREFETCH:
734 		ai->fault_descr = read_ifsr();
735 		ai->va = read_ifar();
736 		break;
737 	default:
738 		ai->fault_descr = 0;
739 		ai->va = regs->elr;
740 		break;
741 	}
742 	ai->abort_type = abort_type;
743 	ai->pc = regs->elr;
744 	ai->regs = regs;
745 }
746 #endif /*ARM32*/
747 
748 #ifdef ARM64
749 static void set_abort_info(uint32_t abort_type __unused,
750 		struct thread_abort_regs *regs, struct tee_pager_abort_info *ai)
751 {
752 	ai->fault_descr = read_esr_el1();
753 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
754 	case ESR_EC_IABT_EL0:
755 	case ESR_EC_IABT_EL1:
756 		ai->abort_type = THREAD_ABORT_PREFETCH;
757 		ai->va = read_far_el1();
758 		break;
759 	case ESR_EC_DABT_EL0:
760 	case ESR_EC_DABT_EL1:
761 	case ESR_EC_SP_ALIGN:
762 		ai->abort_type = THREAD_ABORT_DATA;
763 		ai->va = read_far_el1();
764 		break;
765 	default:
766 		ai->abort_type = THREAD_ABORT_UNDEF;
767 		ai->va = regs->elr;
768 	}
769 	ai->pc = regs->elr;
770 	ai->regs = regs;
771 }
772 #endif /*ARM64*/
773 
774 #ifdef ARM32
775 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
776 {
777 	/*
778 	 * It was a user exception, stop user execution and return
779 	 * to TEE Core.
780 	 */
781 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
782 	ai->regs->r1 = true;
783 	ai->regs->r2 = 0xdeadbeef;
784 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
785 	ai->regs->spsr = read_cpsr();
786 	ai->regs->spsr &= ~CPSR_MODE_MASK;
787 	ai->regs->spsr |= CPSR_MODE_SVC;
788 	ai->regs->spsr &= ~CPSR_FIA;
789 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
790 	/* Select Thumb or ARM mode */
791 	if (ai->regs->elr & 1)
792 		ai->regs->spsr |= CPSR_T;
793 	else
794 		ai->regs->spsr &= ~CPSR_T;
795 }
796 #endif /*ARM32*/
797 
798 #ifdef ARM64
799 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
800 {
801 	uint32_t daif;
802 
803 	/*
804 	 * It was a user exception, stop user execution and return
805 	 * to TEE Core.
806 	 */
807 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
808 	ai->regs->x1 = true;
809 	ai->regs->x2 = 0xdeadbeef;
810 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
811 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
812 
813 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
814 	/* XXX what about DAIF_D? */
815 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
816 }
817 #endif /*ARM64*/
818 
819 void tee_pager_abort_handler(uint32_t abort_type,
820 		struct thread_abort_regs *regs)
821 {
822 	struct tee_pager_abort_info ai;
823 
824 	set_abort_info(abort_type, regs, &ai);
825 
826 	switch (tee_pager_get_fault_type(&ai)) {
827 	case TEE_PAGER_FAULT_TYPE_IGNORE:
828 		break;
829 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
830 		handle_user_ta_panic(&ai);
831 		break;
832 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
833 	default:
834 		tee_pager_handle_fault(&ai);
835 		break;
836 	}
837 }
838 
839 #ifdef CFG_WITH_PAGER
840 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
841 {
842 	size_t n;
843 
844 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
845 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
846 
847 	/* setup memory */
848 	for (n = 0; n < npages; n++) {
849 		struct tee_pager_pmem *pmem;
850 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
851 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
852 		paddr_t pa;
853 		uint32_t attr;
854 
855 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
856 
857 		/* Ignore unmapped pages/blocks */
858 		if (!(attr & TEE_MATTR_VALID_BLOCK))
859 			continue;
860 
861 		pmem = malloc(sizeof(struct tee_pager_pmem));
862 		if (pmem == NULL) {
863 			DMSG("Can't allocate memory");
864 			panic();
865 		}
866 
867 		pmem->pgidx = pgidx;
868 		pmem->area = NULL;
869 
870 		if (unmap) {
871 			/*
872 			 * Note that we're making the page inaccessible
873 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
874 			 * indicate that the descriptor still holds a valid
875 			 * physical address of a page.
876 			 */
877 			core_mmu_set_entry(&tbl_info, pgidx, pa,
878 					   TEE_MATTR_PHYS_BLOCK);
879 		}
880 		tee_pager_npages++;
881 		incr_npages_all();
882 		set_npages();
883 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
884 	}
885 
886 	if (unmap) {
887 		/* Invalidate secure TLB */
888 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
889 	}
890 }
891 
892 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
893 {
894 	bool unmaped = false;
895 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
896 
897 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
898 		panic();
899 
900 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
901 		unmaped |= tee_pager_release_one_zi(vaddr);
902 
903 	/* Invalidate secure TLB */
904 	if (unmaped)
905 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
906 
907 	thread_set_exceptions(exceptions);
908 }
909 
910 void *tee_pager_request_zi(size_t size)
911 {
912 	tee_mm_entry_t *mm;
913 
914 	if (!size)
915 		return NULL;
916 
917 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
918 	if (!mm)
919 		return NULL;
920 
921 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
922 
923 	return (void *)tee_mm_get_smem(mm);
924 }
925 
926 #else /*CFG_WITH_PAGER*/
927 
928 void tee_pager_get_stats(struct tee_pager_stats *stats)
929 {
930 	memset(stats, 0, sizeof(struct tee_pager_stats));
931 }
932 
933 #endif /*CFG_WITH_PAGER*/
934