xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 9bdc34f232fcac3c42210f9ae8fa0c54fc44ddb2)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <mm/tee_pager.h>
41 #include <mm/tee_mm.h>
42 #include <mm/core_mmu.h>
43 #include <tee/arch_svc.h>
44 #include <arm.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <tee_api_defines.h>
47 #include <utee_defines.h>
48 #include <trace.h>
49 #include <util.h>
50 
51 struct tee_pager_abort_info {
52 	uint32_t abort_type;
53 	uint32_t fault_descr;
54 	vaddr_t va;
55 	uint32_t pc;
56 	struct thread_abort_regs *regs;
57 };
58 
59 enum tee_pager_fault_type {
60 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
61 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
62 	TEE_PAGER_FAULT_TYPE_IGNORE,
63 };
64 
65 #ifdef CFG_WITH_PAGER
66 
67 #ifndef CFG_DISABLE_CONCURRENT_EXEC
68 #error "Pager can't be configured together with concurrent execution"
69 #endif
70 
71 struct tee_pager_area {
72 	const uint8_t *hashes;
73 	const uint8_t *store;
74 	uint32_t flags;
75 	tee_mm_entry_t *mm;
76 	TAILQ_ENTRY(tee_pager_area) link;
77 };
78 
79 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
80 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
81 
82 /*
83  * struct tee_pager_pmem - Represents a physical page used for paging.
84  *
85  * @pgidx	an index of the entry in tbl_info. The actual physical
86  *		address is stored here so even if the page isn't mapped,
87  *		there's always an MMU entry holding the physical address.
88  *
89  * @area	a pointer to the pager area
90  */
91 struct tee_pager_pmem {
92 	unsigned pgidx;
93 	struct tee_pager_area *area;
94 	 TAILQ_ENTRY(tee_pager_pmem) link;
95 };
96 
97 /* The list of physical pages. The first page in the list is the oldest */
98 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
99 
100 static struct tee_pager_pmem_head tee_pager_pmem_head =
101 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
102 
103 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
104 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
105 
106 /* number of pages hidden */
107 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
108 
109 /* Number of registered physical pages, used hiding pages. */
110 static size_t tee_pager_npages;
111 
112 #ifdef CFG_WITH_STATS
113 static struct tee_pager_stats pager_stats;
114 
115 static inline void incr_ro_hits(void)
116 {
117 	pager_stats.ro_hits++;
118 }
119 
120 static inline void incr_rw_hits(void)
121 {
122 	pager_stats.rw_hits++;
123 }
124 
125 static inline void incr_hidden_hits(void)
126 {
127 	pager_stats.hidden_hits++;
128 }
129 
130 static inline void incr_zi_released(void)
131 {
132 	pager_stats.zi_released++;
133 }
134 
135 static inline void incr_npages_all(void)
136 {
137 	pager_stats.npages_all++;
138 }
139 
140 static inline void set_npages(void)
141 {
142 	pager_stats.npages = tee_pager_npages;
143 }
144 
145 void tee_pager_get_stats(struct tee_pager_stats *stats)
146 {
147 	*stats = pager_stats;
148 
149 	pager_stats.hidden_hits = 0;
150 	pager_stats.ro_hits = 0;
151 	pager_stats.rw_hits = 0;
152 	pager_stats.zi_released = 0;
153 }
154 
155 #else /* CFG_WITH_STATS */
156 static inline void incr_ro_hits(void) { }
157 static inline void incr_rw_hits(void) { }
158 static inline void incr_hidden_hits(void) { }
159 static inline void incr_zi_released(void) { }
160 static inline void incr_npages_all(void) { }
161 static inline void set_npages(void) { }
162 
163 void tee_pager_get_stats(struct tee_pager_stats *stats)
164 {
165 	memset(stats, 0, sizeof(struct tee_pager_stats));
166 }
167 #endif /* CFG_WITH_STATS */
168 
169 /*
170  * Reference to translation table used to map the virtual memory range
171  * covered by the pager.
172  */
173 static struct core_mmu_table_info tbl_info;
174 
175 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
176 		const void *hashes)
177 {
178 	struct tee_pager_area *area;
179 	size_t tbl_va_size;
180 
181 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
182 		tee_mm_get_smem(mm),
183 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
184 		flags, store, hashes);
185 
186 	if (flags & TEE_PAGER_AREA_RO)
187 		TEE_ASSERT(store && hashes);
188 	else if (flags & TEE_PAGER_AREA_RW)
189 		TEE_ASSERT(!store && !hashes);
190 	else
191 		panic();
192 
193 	if (!tbl_info.num_entries) {
194 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
195 					&tbl_info))
196 			return false;
197 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
198 			DMSG("Unsupported page size in translation table %u",
199 			     1 << tbl_info.shift);
200 			return false;
201 		}
202 	}
203 
204 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
205 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
206 				   tbl_info.va_base, tbl_va_size)) {
207 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
208 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
209 			tbl_info.va_base, tbl_va_size);
210 		return false;
211 	}
212 
213 
214 
215 	area = malloc(sizeof(struct tee_pager_area));
216 	if (!area)
217 		return false;
218 
219 
220 	area->mm = mm;
221 	area->flags = flags;
222 	area->store = store;
223 	area->hashes = hashes;
224 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
225 	return true;
226 }
227 
228 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
229 {
230 	struct tee_pager_area *area;
231 
232 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
233 		tee_mm_entry_t *mm = area->mm;
234 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
235 
236 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
237 			return area;
238 	}
239 	return NULL;
240 }
241 
242 static uint32_t get_area_mattr(struct tee_pager_area *area)
243 {
244 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
245 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
246 
247 	attr |= TEE_MATTR_PRW;
248 	if (area->flags & TEE_PAGER_AREA_X)
249 		attr |= TEE_MATTR_PX;
250 
251 	return attr;
252 }
253 
254 
255 
256 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
257 {
258 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
259 
260 	if (area->store) {
261 		size_t rel_pg_idx = pg_idx - area->mm->offset;
262 		const void *stored_page = area->store +
263 					  rel_pg_idx * SMALL_PAGE_SIZE;
264 
265 		memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
266 		incr_ro_hits();
267 	} else {
268 		memset((void *)page_va, 0, SMALL_PAGE_SIZE);
269 		incr_rw_hits();
270 	}
271 }
272 
273 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
274 {
275 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
276 
277 	if (area->store) {
278 		size_t rel_pg_idx = pg_idx - area->mm->offset;
279 		const void *hash = area->hashes +
280 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
281 
282 		if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
283 				TEE_SUCCESS) {
284 			EMSG("PH 0x%" PRIxVA " failed", page_va);
285 			panic();
286 		}
287 	}
288 }
289 
290 static bool tee_pager_unhide_page(vaddr_t page_va)
291 {
292 	struct tee_pager_pmem *pmem;
293 
294 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
295 		paddr_t pa;
296 		uint32_t attr;
297 
298 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
299 
300 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
301 			continue;
302 
303 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
304 			/* page is hidden, show and move to back */
305 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
306 					   get_area_mattr(pmem->area));
307 
308 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
309 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
310 
311 			/* TODO only invalidate entry touched above */
312 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
313 
314 			incr_hidden_hits();
315 			return true;
316 		}
317 	}
318 
319 	return false;
320 }
321 
322 static void tee_pager_hide_pages(void)
323 {
324 	struct tee_pager_pmem *pmem;
325 	size_t n = 0;
326 
327 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
328 		paddr_t pa;
329 		uint32_t attr;
330 
331 		if (n >= TEE_PAGER_NHIDE)
332 			break;
333 		n++;
334 
335 		/*
336 		 * we cannot hide pages when pmem->area is not defined as
337 		 * unhide requires pmem->area to be defined
338 		 */
339 		if (!pmem->area)
340 			continue;
341 
342 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
343 		if (!(attr & TEE_MATTR_VALID_BLOCK))
344 			continue;
345 
346 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
347 				   TEE_MATTR_HIDDEN_BLOCK);
348 
349 	}
350 
351 	/* TODO only invalidate entries touched above */
352 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
353 }
354 
355 /*
356  * Find mapped pmem, hide and move to pageble pmem.
357  * Return false if page was not mapped, and true if page was mapped.
358  */
359 static bool tee_pager_release_one_zi(vaddr_t page_va)
360 {
361 	struct tee_pager_pmem *pmem;
362 	unsigned pgidx;
363 	paddr_t pa;
364 	uint32_t attr;
365 
366 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
367 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
368 
369 #ifdef TEE_PAGER_DEBUG_PRINT
370 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
371 #endif
372 
373 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
374 		if (pmem->pgidx != pgidx)
375 			continue;
376 
377 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
378 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
379 		tee_pager_npages++;
380 		set_npages();
381 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
382 		incr_zi_released();
383 
384 
385 		return true;
386 	}
387 
388 	return false;
389 }
390 #endif /*CFG_WITH_PAGER*/
391 
392 #ifdef ARM32
393 /* Returns true if the exception originated from user mode */
394 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
395 {
396 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
397 }
398 #endif /*ARM32*/
399 
400 #ifdef ARM64
401 /* Returns true if the exception originated from user mode */
402 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
403 {
404 	uint32_t spsr = ai->regs->spsr;
405 
406 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
407 		return true;
408 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
409 	    SPSR_64_MODE_EL0)
410 		return true;
411 	return false;
412 }
413 #endif /*ARM64*/
414 
415 #ifdef ARM32
416 /* Returns true if the exception originated from abort mode */
417 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
418 {
419 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
420 }
421 #endif /*ARM32*/
422 
423 #ifdef ARM64
424 /* Returns true if the exception originated from abort mode */
425 static bool tee_pager_is_abort_in_abort_handler(
426 		struct tee_pager_abort_info *ai __unused)
427 {
428 	return false;
429 }
430 #endif /*ARM64*/
431 
432 static __unused const char *abort_type_to_str(uint32_t abort_type)
433 {
434 	if (abort_type == THREAD_ABORT_DATA)
435 		return "data";
436 	if (abort_type == THREAD_ABORT_PREFETCH)
437 		return "prefetch";
438 	return "undef";
439 }
440 
441 static __unused void tee_pager_print_detailed_abort(
442 				struct tee_pager_abort_info *ai __unused,
443 				const char *ctx __unused)
444 {
445 	EMSG_RAW("\n");
446 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "\n",
447 		ctx, abort_type_to_str(ai->abort_type), ai->va);
448 #ifdef ARM32
449 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
450 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
451 		 read_contextidr());
452 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
453 		 get_core_pos(), ai->regs->spsr);
454 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
455 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
456 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
457 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
458 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
459 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
460 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
461 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
462 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
463 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
464 #endif /*ARM32*/
465 #ifdef ARM64
466 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
467 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
468 		 read_contextidr_el1());
469 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
470 		 get_core_pos(), (uint32_t)ai->regs->spsr);
471 	EMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
472 		 ai->regs->x0, ai->regs->x1);
473 	EMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
474 		 ai->regs->x2, ai->regs->x3);
475 	EMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
476 		 ai->regs->x4, ai->regs->x5);
477 	EMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
478 		 ai->regs->x6, ai->regs->x7);
479 	EMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
480 		 ai->regs->x8, ai->regs->x9);
481 	EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
482 		 ai->regs->x10, ai->regs->x11);
483 	EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
484 		 ai->regs->x12, ai->regs->x13);
485 	EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
486 		 ai->regs->x14, ai->regs->x15);
487 	EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
488 		 ai->regs->x16, ai->regs->x17);
489 	EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
490 		 ai->regs->x18, ai->regs->x19);
491 	EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
492 		 ai->regs->x20, ai->regs->x21);
493 	EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
494 		 ai->regs->x22, ai->regs->x23);
495 	EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
496 		 ai->regs->x24, ai->regs->x25);
497 	EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
498 		 ai->regs->x26, ai->regs->x27);
499 	EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
500 		 ai->regs->x28, ai->regs->x29);
501 	EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
502 		 ai->regs->x30, ai->regs->elr);
503 	EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
504 #endif /*ARM64*/
505 }
506 
507 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
508 {
509 #ifdef CFG_TEE_CORE_TA_TRACE
510 	tee_pager_print_detailed_abort(ai, "user TA");
511 	tee_ta_dump_current();
512 #endif
513 }
514 
515 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
516 {
517 #if (TRACE_LEVEL >= TRACE_INFO)
518 	tee_pager_print_detailed_abort(ai, "core");
519 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
520 }
521 
522 static void tee_pager_print_error_abort(
523 		struct tee_pager_abort_info *ai __unused)
524 {
525 #if (TRACE_LEVEL >= TRACE_INFO)
526 	/* full verbose log at DEBUG level */
527 	tee_pager_print_detailed_abort(ai, "core");
528 #else
529 #ifdef ARM32
530 	EMSG("%s-abort at 0x%" PRIxVA "\n"
531 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
532 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
533 	     abort_type_to_str(ai->abort_type),
534 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
535 	     read_mpidr(), read_spsr());
536 #endif /*ARM32*/
537 #ifdef ARM64
538 	EMSG("%s-abort at 0x%" PRIxVA "\n"
539 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
540 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
541 	     abort_type_to_str(ai->abort_type),
542 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
543 	     read_contextidr_el1(),
544 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
545 #endif /*ARM64*/
546 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
547 }
548 
549 static enum tee_pager_fault_type tee_pager_get_fault_type(
550 		struct tee_pager_abort_info *ai)
551 {
552 
553 	/* In case of multithreaded version, this section must be protected */
554 	if (tee_pager_is_user_exception(ai)) {
555 		tee_pager_print_user_abort(ai);
556 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
557 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
558 	}
559 
560 	if (tee_pager_is_abort_in_abort_handler(ai)) {
561 		tee_pager_print_error_abort(ai);
562 		EMSG("[PAGER] abort in abort handler (trap CPU)");
563 		panic();
564 	}
565 
566 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
567 		tee_pager_print_error_abort(ai);
568 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
569 		panic();
570 	}
571 
572 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
573 	case CORE_MMU_FAULT_ALIGNMENT:
574 		tee_pager_print_error_abort(ai);
575 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
576 		panic();
577 		break;
578 
579 	case CORE_MMU_FAULT_ACCESS_BIT:
580 		tee_pager_print_error_abort(ai);
581 		EMSG("[TEE_PAGER] access bit fault!  (trap CPU)");
582 		panic();
583 		break;
584 
585 	case CORE_MMU_FAULT_DEBUG_EVENT:
586 		tee_pager_print_abort(ai);
587 		DMSG("[TEE_PAGER] Ignoring debug event!");
588 		return TEE_PAGER_FAULT_TYPE_IGNORE;
589 
590 	case CORE_MMU_FAULT_TRANSLATION:
591 	case CORE_MMU_FAULT_PERMISSION:
592 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
593 
594 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
595 		tee_pager_print_abort(ai);
596 		DMSG("[TEE_PAGER] Ignoring async external abort!");
597 		return TEE_PAGER_FAULT_TYPE_IGNORE;
598 
599 	case CORE_MMU_FAULT_OTHER:
600 	default:
601 		tee_pager_print_abort(ai);
602 		DMSG("[TEE_PAGER] Unhandled fault!");
603 		return TEE_PAGER_FAULT_TYPE_IGNORE;
604 	}
605 }
606 
607 
608 #ifdef CFG_WITH_PAGER
609 
610 /* Finds the oldest page and remaps it for the new virtual address */
611 static struct tee_pager_pmem *tee_pager_get_page(
612 		struct tee_pager_abort_info *ai,
613 		struct tee_pager_area *area)
614 {
615 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
616 	struct tee_pager_pmem *pmem;
617 	paddr_t pa;
618 	uint32_t attr;
619 
620 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
621 
622 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
623 
624 	if (attr & TEE_MATTR_PHYS_BLOCK) {
625 		/*
626 		 * There's an pmem entry using this mmu entry, let's use
627 		 * that entry in the new mapping.
628 		 */
629 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
630 			if (pmem->pgidx == pgidx)
631 				break;
632 		}
633 		if (!pmem) {
634 			tee_pager_print_abort(ai);
635 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
636 			panic();
637 		}
638 	} else {
639 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
640 		if (!pmem) {
641 			tee_pager_print_abort(ai);
642 			DMSG("No pmem entries");
643 			panic();
644 		}
645 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
646 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
647 	}
648 
649 	pmem->pgidx = pgidx;
650 	pmem->area = area;
651 	core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
652 
653 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
654 	if (area->store) {
655 		/* move page to back */
656 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
657 	} else {
658 		/* Move page to rw list */
659 		TEE_ASSERT(tee_pager_npages > 0);
660 		tee_pager_npages--;
661 		set_npages();
662 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
663 	}
664 
665 	/* TODO only invalidate entries touched above */
666 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
667 
668 #ifdef TEE_PAGER_DEBUG_PRINT
669 	DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
670 #endif
671 
672 	return pmem;
673 }
674 
675 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
676 {
677 	struct tee_pager_area *area;
678 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
679 
680 #ifdef TEE_PAGER_DEBUG_PRINT
681 	tee_pager_print_abort(ai);
682 #endif
683 
684 	/* check if the access is valid */
685 	area = tee_pager_find_area(ai->va);
686 	if (!area) {
687 		tee_pager_print_abort(ai);
688 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
689 		panic();
690 	}
691 
692 	if (!tee_pager_unhide_page(page_va)) {
693 		/* the page wasn't hidden */
694 		tee_pager_get_page(ai, area);
695 
696 		/* load page code & data */
697 		tee_pager_load_page(area, page_va);
698 		/* TODO remap readonly if TEE_PAGER_AREA_RO */
699 		tee_pager_verify_page(area, page_va);
700 		/* TODO remap executable if TEE_PAGER_AREA_X */
701 
702 		if (area->flags & TEE_PAGER_AREA_X) {
703 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
704 				(void *)page_va, SMALL_PAGE_SIZE);
705 
706 			cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
707 				(void *)page_va, SMALL_PAGE_SIZE);
708 		}
709 	}
710 
711 	tee_pager_hide_pages();
712 	/* end protect (multithreded version) */
713 }
714 
715 #else /*CFG_WITH_PAGER*/
716 
717 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
718 {
719 	/*
720 	 * Until PAGER is supported, trap CPU here.
721 	 */
722 	tee_pager_print_error_abort(ai);
723 	EMSG("Unexpected page fault! Trap CPU");
724 	panic();
725 }
726 
727 #endif /*CFG_WITH_PAGER*/
728 
729 #ifdef ARM32
730 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
731 		struct tee_pager_abort_info *ai)
732 {
733 	switch (abort_type) {
734 	case THREAD_ABORT_DATA:
735 		ai->fault_descr = read_dfsr();
736 		ai->va = read_dfar();
737 		break;
738 	case THREAD_ABORT_PREFETCH:
739 		ai->fault_descr = read_ifsr();
740 		ai->va = read_ifar();
741 		break;
742 	default:
743 		ai->fault_descr = 0;
744 		ai->va = regs->elr;
745 		break;
746 	}
747 	ai->abort_type = abort_type;
748 	ai->pc = regs->elr;
749 	ai->regs = regs;
750 }
751 #endif /*ARM32*/
752 
753 #ifdef ARM64
754 static void set_abort_info(uint32_t abort_type __unused,
755 		struct thread_abort_regs *regs, struct tee_pager_abort_info *ai)
756 {
757 	ai->fault_descr = read_esr_el1();
758 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
759 	case ESR_EC_IABT_EL0:
760 	case ESR_EC_IABT_EL1:
761 		ai->abort_type = THREAD_ABORT_PREFETCH;
762 		ai->va = read_far_el1();
763 		break;
764 	case ESR_EC_DABT_EL0:
765 	case ESR_EC_DABT_EL1:
766 	case ESR_EC_SP_ALIGN:
767 		ai->abort_type = THREAD_ABORT_DATA;
768 		ai->va = read_far_el1();
769 		break;
770 	default:
771 		ai->abort_type = THREAD_ABORT_UNDEF;
772 		ai->va = regs->elr;
773 	}
774 	ai->pc = regs->elr;
775 	ai->regs = regs;
776 }
777 #endif /*ARM64*/
778 
779 #ifdef ARM32
780 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
781 {
782 	/*
783 	 * It was a user exception, stop user execution and return
784 	 * to TEE Core.
785 	 */
786 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
787 	ai->regs->r1 = true;
788 	ai->regs->r2 = 0xdeadbeef;
789 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
790 	ai->regs->spsr = read_cpsr();
791 	ai->regs->spsr &= ~CPSR_MODE_MASK;
792 	ai->regs->spsr |= CPSR_MODE_SVC;
793 	ai->regs->spsr &= ~CPSR_FIA;
794 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
795 	/* Select Thumb or ARM mode */
796 	if (ai->regs->elr & 1)
797 		ai->regs->spsr |= CPSR_T;
798 	else
799 		ai->regs->spsr &= ~CPSR_T;
800 }
801 #endif /*ARM32*/
802 
803 #ifdef ARM64
804 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
805 {
806 	uint32_t daif;
807 
808 	/*
809 	 * It was a user exception, stop user execution and return
810 	 * to TEE Core.
811 	 */
812 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
813 	ai->regs->x1 = true;
814 	ai->regs->x2 = 0xdeadbeef;
815 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
816 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
817 
818 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
819 	/* XXX what about DAIF_D? */
820 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
821 }
822 #endif /*ARM64*/
823 
824 void tee_pager_abort_handler(uint32_t abort_type,
825 		struct thread_abort_regs *regs)
826 {
827 	struct tee_pager_abort_info ai;
828 
829 	set_abort_info(abort_type, regs, &ai);
830 
831 	switch (tee_pager_get_fault_type(&ai)) {
832 	case TEE_PAGER_FAULT_TYPE_IGNORE:
833 		break;
834 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
835 		handle_user_ta_panic(&ai);
836 		break;
837 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
838 	default:
839 		tee_pager_handle_fault(&ai);
840 		break;
841 	}
842 }
843 
844 #ifdef CFG_WITH_PAGER
845 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
846 {
847 	size_t n;
848 
849 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
850 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
851 
852 	/* setup memory */
853 	for (n = 0; n < npages; n++) {
854 		struct tee_pager_pmem *pmem;
855 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
856 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
857 		paddr_t pa;
858 		uint32_t attr;
859 
860 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
861 
862 		/* Ignore unmapped pages/blocks */
863 		if (!(attr & TEE_MATTR_VALID_BLOCK))
864 			continue;
865 
866 		pmem = malloc(sizeof(struct tee_pager_pmem));
867 		if (pmem == NULL) {
868 			DMSG("Can't allocate memory");
869 			panic();
870 		}
871 
872 		pmem->pgidx = pgidx;
873 		pmem->area = NULL;
874 
875 		if (unmap) {
876 			/*
877 			 * Note that we're making the page inaccessible
878 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
879 			 * indicate that the descriptor still holds a valid
880 			 * physical address of a page.
881 			 */
882 			core_mmu_set_entry(&tbl_info, pgidx, pa,
883 					   TEE_MATTR_PHYS_BLOCK);
884 		}
885 		tee_pager_npages++;
886 		incr_npages_all();
887 		set_npages();
888 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
889 	}
890 
891 	if (unmap) {
892 		/* Invalidate secure TLB */
893 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
894 	}
895 }
896 
897 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
898 {
899 	bool unmaped = false;
900 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
901 
902 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
903 		panic();
904 
905 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
906 		unmaped |= tee_pager_release_one_zi(vaddr);
907 
908 	/* Invalidate secure TLB */
909 	if (unmaped)
910 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
911 
912 	thread_set_exceptions(exceptions);
913 }
914 
915 void *tee_pager_request_zi(size_t size)
916 {
917 	tee_mm_entry_t *mm;
918 
919 	if (!size)
920 		return NULL;
921 
922 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
923 	if (!mm)
924 		return NULL;
925 
926 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
927 
928 	return (void *)tee_mm_get_smem(mm);
929 }
930 
931 #else /*CFG_WITH_PAGER*/
932 
933 void tee_pager_get_stats(struct tee_pager_stats *stats)
934 {
935 	memset(stats, 0, sizeof(struct tee_pager_stats));
936 }
937 
938 #endif /*CFG_WITH_PAGER*/
939