xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 176c959dcbda2950a2a7b4853a7cf6bc571c1237)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <mm/tee_pager.h>
41 #include <mm/tee_mm.h>
42 #include <mm/core_mmu.h>
43 #include <tee/arch_svc.h>
44 #include <arm.h>
45 #include <tee/tee_cryp_provider.h>
46 #include <tee_api_defines.h>
47 #include <utee_defines.h>
48 #include <trace.h>
49 #include <util.h>
50 
51 struct tee_pager_abort_info {
52 	uint32_t abort_type;
53 	uint32_t fault_descr;
54 	vaddr_t va;
55 	uint32_t pc;
56 	struct thread_abort_regs *regs;
57 };
58 
59 enum tee_pager_fault_type {
60 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
61 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
62 	TEE_PAGER_FAULT_TYPE_IGNORE,
63 };
64 
65 #ifdef CFG_WITH_PAGER
66 struct tee_pager_area {
67 	const uint8_t *hashes;
68 	const uint8_t *store;
69 	uint32_t flags;
70 	tee_mm_entry_t *mm;
71 	TAILQ_ENTRY(tee_pager_area) link;
72 };
73 
74 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
75 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
76 
77 /*
78  * struct tee_pager_pmem - Represents a physical page used for paging.
79  *
80  * @pgidx	an index of the entry in tbl_info. The actual physical
81  *		address is stored here so even if the page isn't mapped,
82  *		there's always an MMU entry holding the physical address.
83  *
84  * @area	a pointer to the pager area
85  */
86 struct tee_pager_pmem {
87 	unsigned pgidx;
88 	struct tee_pager_area *area;
89 	 TAILQ_ENTRY(tee_pager_pmem) link;
90 };
91 
92 /* The list of physical pages. The first page in the list is the oldest */
93 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
94 
95 static struct tee_pager_pmem_head tee_pager_pmem_head =
96 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
97 
98 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
99 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
100 
101 /* number of pages hidden */
102 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
103 
104 /* Number of registered physical pages, used hiding pages. */
105 static size_t tee_pager_npages;
106 
107 /*
108  * Reference to translation table used to map the virtual memory range
109  * covered by the pager.
110  */
111 static struct core_mmu_table_info tbl_info;
112 
113 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
114 		const void *hashes)
115 {
116 	struct tee_pager_area *area;
117 	size_t tbl_va_size;
118 
119 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
120 		tee_mm_get_smem(mm),
121 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
122 		flags, store, hashes);
123 
124 	if (flags & TEE_PAGER_AREA_RO)
125 		TEE_ASSERT(store && hashes);
126 	else if (flags & TEE_PAGER_AREA_RW)
127 		TEE_ASSERT(!store && !hashes);
128 	else
129 		panic();
130 
131 	if (!tbl_info.num_entries) {
132 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
133 					&tbl_info))
134 			return false;
135 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
136 			DMSG("Unsupported page size in translation table %u",
137 			     1 << tbl_info.shift);
138 			return false;
139 		}
140 	}
141 
142 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
143 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
144 				   tbl_info.va_base, tbl_va_size)) {
145 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
146 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
147 			tbl_info.va_base, tbl_va_size);
148 		return false;
149 	}
150 
151 
152 
153 	area = malloc(sizeof(struct tee_pager_area));
154 	if (!area)
155 		return false;
156 
157 
158 	area->mm = mm;
159 	area->flags = flags;
160 	area->store = store;
161 	area->hashes = hashes;
162 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
163 	return true;
164 }
165 
166 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
167 {
168 	struct tee_pager_area *area;
169 
170 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
171 		tee_mm_entry_t *mm = area->mm;
172 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
173 
174 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
175 			return area;
176 	}
177 	return NULL;
178 }
179 
180 static uint32_t get_area_mattr(struct tee_pager_area *area)
181 {
182 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
183 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE;
184 
185 	attr |= TEE_MATTR_PRW;
186 	if (area->flags & TEE_PAGER_AREA_X)
187 		attr |= TEE_MATTR_PX;
188 
189 	return attr;
190 }
191 
192 
193 
194 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va)
195 {
196 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
197 
198 	if (area->store) {
199 		size_t rel_pg_idx = pg_idx - area->mm->offset;
200 		const void *stored_page = area->store +
201 					  rel_pg_idx * SMALL_PAGE_SIZE;
202 
203 		memcpy((void *)page_va, stored_page, SMALL_PAGE_SIZE);
204 	} else {
205 		memset((void *)page_va, 0, SMALL_PAGE_SIZE);
206 	}
207 }
208 
209 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va)
210 {
211 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
212 
213 	if (area->store) {
214 		size_t rel_pg_idx = pg_idx - area->mm->offset;
215 		const void *hash = area->hashes +
216 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
217 
218 		if (hash_sha256_check(hash, (void *)page_va, SMALL_PAGE_SIZE) !=
219 				TEE_SUCCESS) {
220 			EMSG("PH 0x%" PRIxVA " failed", page_va);
221 			panic();
222 		}
223 	}
224 }
225 
226 static bool tee_pager_unhide_page(vaddr_t page_va)
227 {
228 	struct tee_pager_pmem *pmem;
229 
230 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
231 		paddr_t pa;
232 		uint32_t attr;
233 
234 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
235 
236 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
237 			continue;
238 
239 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
240 			/* page is hidden, show and move to back */
241 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
242 					   get_area_mattr(pmem->area));
243 
244 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
245 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
246 
247 			/* TODO only invalidate entry touched above */
248 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
249 
250 			return true;
251 		}
252 	}
253 
254 	return false;
255 }
256 
257 static void tee_pager_hide_pages(void)
258 {
259 	struct tee_pager_pmem *pmem;
260 	size_t n = 0;
261 
262 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
263 		paddr_t pa;
264 		uint32_t attr;
265 
266 		if (n >= TEE_PAGER_NHIDE)
267 			break;
268 		n++;
269 
270 		/*
271 		 * we cannot hide pages when pmem->area is not defined as
272 		 * unhide requires pmem->area to be defined
273 		 */
274 		if (!pmem->area)
275 			continue;
276 
277 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
278 		if (!(attr & TEE_MATTR_VALID_BLOCK))
279 			continue;
280 
281 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
282 				   TEE_MATTR_HIDDEN_BLOCK);
283 
284 	}
285 
286 	/* TODO only invalidate entries touched above */
287 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
288 }
289 
290 /*
291  * Find mapped pmem, hide and move to pageble pmem.
292  * Return false if page was not mapped, and true if page was mapped.
293  */
294 static bool tee_pager_release_one_zi(vaddr_t page_va)
295 {
296 	struct tee_pager_pmem *pmem;
297 	unsigned pgidx;
298 	paddr_t pa;
299 	uint32_t attr;
300 
301 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
302 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
303 
304 #ifdef TEE_PAGER_DEBUG_PRINT
305 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
306 #endif
307 
308 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
309 		if (pmem->pgidx != pgidx)
310 			continue;
311 
312 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
313 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
314 		tee_pager_npages++;
315 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
316 
317 		return true;
318 	}
319 
320 	return false;
321 }
322 #endif /*CFG_WITH_PAGER*/
323 
324 #ifdef ARM32
325 /* Returns true if the exception originated from user mode */
326 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
327 {
328 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
329 }
330 #endif /*ARM32*/
331 
332 #ifdef ARM64
333 /* Returns true if the exception originated from user mode */
334 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
335 {
336 	uint32_t spsr = ai->regs->spsr;
337 
338 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
339 		return true;
340 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
341 	    SPSR_64_MODE_EL0)
342 		return true;
343 	return false;
344 }
345 #endif /*ARM64*/
346 
347 #ifdef ARM32
348 /* Returns true if the exception originated from abort mode */
349 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
350 {
351 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
352 }
353 #endif /*ARM32*/
354 
355 #ifdef ARM64
356 /* Returns true if the exception originated from abort mode */
357 static bool tee_pager_is_abort_in_abort_handler(
358 		struct tee_pager_abort_info *ai __unused)
359 {
360 	return false;
361 }
362 #endif /*ARM64*/
363 
364 static __unused const char *abort_type_to_str(uint32_t abort_type)
365 {
366 	if (abort_type == THREAD_ABORT_DATA)
367 		return "data";
368 	if (abort_type == THREAD_ABORT_PREFETCH)
369 		return "prefetch";
370 	return "undef";
371 }
372 
373 static __unused void tee_pager_print_detailed_abort(
374 				struct tee_pager_abort_info *ai __unused,
375 				const char *ctx __unused)
376 {
377 	EMSG_RAW("\n");
378 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "\n",
379 		ctx, abort_type_to_str(ai->abort_type), ai->va);
380 #ifdef ARM32
381 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
382 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
383 		 read_contextidr());
384 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
385 		 get_core_pos(), ai->regs->spsr);
386 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
387 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
388 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
389 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
390 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
391 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
392 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
393 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
394 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
395 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
396 #endif /*ARM32*/
397 #ifdef ARM64
398 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
399 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
400 		 read_contextidr_el1());
401 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
402 		 get_core_pos(), (uint32_t)ai->regs->spsr);
403 	EMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
404 		 ai->regs->x0, ai->regs->x1);
405 	EMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
406 		 ai->regs->x2, ai->regs->x3);
407 	EMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
408 		 ai->regs->x4, ai->regs->x5);
409 	EMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
410 		 ai->regs->x6, ai->regs->x7);
411 	EMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
412 		 ai->regs->x8, ai->regs->x9);
413 	EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
414 		 ai->regs->x10, ai->regs->x11);
415 	EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
416 		 ai->regs->x12, ai->regs->x13);
417 	EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
418 		 ai->regs->x14, ai->regs->x15);
419 	EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
420 		 ai->regs->x16, ai->regs->x17);
421 	EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
422 		 ai->regs->x18, ai->regs->x19);
423 	EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
424 		 ai->regs->x20, ai->regs->x21);
425 	EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
426 		 ai->regs->x22, ai->regs->x23);
427 	EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
428 		 ai->regs->x24, ai->regs->x25);
429 	EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
430 		 ai->regs->x26, ai->regs->x27);
431 	EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
432 		 ai->regs->x28, ai->regs->x29);
433 	EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
434 		 ai->regs->x30, ai->regs->elr);
435 	EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
436 #endif /*ARM64*/
437 }
438 
439 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
440 {
441 #ifdef CFG_TEE_CORE_TA_TRACE
442 	tee_pager_print_detailed_abort(ai, "user TA");
443 	tee_ta_dump_current();
444 #endif
445 }
446 
447 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
448 {
449 #if (TRACE_LEVEL >= TRACE_INFO)
450 	tee_pager_print_detailed_abort(ai, "core");
451 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
452 }
453 
454 static void tee_pager_print_error_abort(
455 		struct tee_pager_abort_info *ai __unused)
456 {
457 #if (TRACE_LEVEL >= TRACE_INFO)
458 	/* full verbose log at DEBUG level */
459 	tee_pager_print_detailed_abort(ai, "core");
460 #else
461 #ifdef ARM32
462 	EMSG("%s-abort at 0x%" PRIxVA "\n"
463 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
464 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
465 	     abort_type_to_str(ai->abort_type),
466 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
467 	     read_mpidr(), read_spsr());
468 #endif /*ARM32*/
469 #ifdef ARM64
470 	EMSG("%s-abort at 0x%" PRIxVA "\n"
471 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
472 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
473 	     abort_type_to_str(ai->abort_type),
474 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
475 	     read_contextidr_el1(),
476 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
477 #endif /*ARM64*/
478 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
479 }
480 
481 static enum tee_pager_fault_type tee_pager_get_fault_type(
482 		struct tee_pager_abort_info *ai)
483 {
484 
485 	/* In case of multithreaded version, this section must be protected */
486 	if (tee_pager_is_user_exception(ai)) {
487 		tee_pager_print_user_abort(ai);
488 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
489 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
490 	}
491 
492 	if (tee_pager_is_abort_in_abort_handler(ai)) {
493 		tee_pager_print_error_abort(ai);
494 		EMSG("[PAGER] abort in abort handler (trap CPU)");
495 		panic();
496 	}
497 
498 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
499 		tee_pager_print_error_abort(ai);
500 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
501 		panic();
502 	}
503 
504 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
505 	case CORE_MMU_FAULT_ALIGNMENT:
506 		tee_pager_print_error_abort(ai);
507 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
508 		panic();
509 		break;
510 
511 	case CORE_MMU_FAULT_ACCESS_BIT:
512 		tee_pager_print_error_abort(ai);
513 		EMSG("[TEE_PAGER] access bit fault!  (trap CPU)");
514 		panic();
515 		break;
516 
517 	case CORE_MMU_FAULT_DEBUG_EVENT:
518 		tee_pager_print_abort(ai);
519 		DMSG("[TEE_PAGER] Ignoring debug event!");
520 		return TEE_PAGER_FAULT_TYPE_IGNORE;
521 
522 	case CORE_MMU_FAULT_TRANSLATION:
523 	case CORE_MMU_FAULT_PERMISSION:
524 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
525 
526 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
527 		tee_pager_print_abort(ai);
528 		DMSG("[TEE_PAGER] Ignoring async external abort!");
529 		return TEE_PAGER_FAULT_TYPE_IGNORE;
530 
531 	case CORE_MMU_FAULT_OTHER:
532 	default:
533 		tee_pager_print_abort(ai);
534 		DMSG("[TEE_PAGER] Unhandled fault!");
535 		return TEE_PAGER_FAULT_TYPE_IGNORE;
536 	}
537 }
538 
539 
540 #ifdef CFG_WITH_PAGER
541 
542 /* Finds the oldest page and remaps it for the new virtual address */
543 static struct tee_pager_pmem *tee_pager_get_page(
544 		struct tee_pager_abort_info *ai,
545 		struct tee_pager_area *area)
546 {
547 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
548 	struct tee_pager_pmem *pmem;
549 	paddr_t pa;
550 	uint32_t attr;
551 
552 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
553 
554 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
555 
556 	if (attr & TEE_MATTR_PHYS_BLOCK) {
557 		/*
558 		 * There's an pmem entry using this mmu entry, let's use
559 		 * that entry in the new mapping.
560 		 */
561 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
562 			if (pmem->pgidx == pgidx)
563 				break;
564 		}
565 		if (!pmem) {
566 			tee_pager_print_abort(ai);
567 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
568 			panic();
569 		}
570 	} else {
571 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
572 		if (!pmem) {
573 			tee_pager_print_abort(ai);
574 			DMSG("No pmem entries");
575 			panic();
576 		}
577 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
578 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
579 	}
580 
581 	pmem->pgidx = pgidx;
582 	pmem->area = area;
583 	core_mmu_set_entry(&tbl_info, pgidx, pa, get_area_mattr(area));
584 
585 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
586 	if (area->store) {
587 		/* move page to back */
588 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
589 	} else {
590 		/* Move page to rw list */
591 		TEE_ASSERT(tee_pager_npages > 0);
592 		tee_pager_npages--;
593 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
594 	}
595 
596 	/* TODO only invalidate entries touched above */
597 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
598 
599 #ifdef TEE_PAGER_DEBUG_PRINT
600 	DMSG("Mapped 0x%x -> 0x%x", core_mmu_idx2va(&tbl_info, pgidx), pa);
601 #endif
602 
603 	return pmem;
604 }
605 
606 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
607 {
608 	struct tee_pager_area *area;
609 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
610 
611 #ifdef TEE_PAGER_DEBUG_PRINT
612 	tee_pager_print_abort(ai);
613 #endif
614 
615 	/* check if the access is valid */
616 	area = tee_pager_find_area(ai->va);
617 	if (!area) {
618 		tee_pager_print_abort(ai);
619 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
620 		panic();
621 	}
622 
623 	if (!tee_pager_unhide_page(page_va)) {
624 		/* the page wasn't hidden */
625 		tee_pager_get_page(ai, area);
626 
627 		/* load page code & data */
628 		tee_pager_load_page(area, page_va);
629 		/* TODO remap readonly if TEE_PAGER_AREA_RO */
630 		tee_pager_verify_page(area, page_va);
631 		/* TODO remap executable if TEE_PAGER_AREA_X */
632 
633 		if (area->flags & TEE_PAGER_AREA_X) {
634 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
635 				(void *)page_va, SMALL_PAGE_SIZE);
636 
637 			cache_maintenance_l1(ICACHE_AREA_INVALIDATE,
638 				(void *)page_va, SMALL_PAGE_SIZE);
639 		}
640 	}
641 
642 	tee_pager_hide_pages();
643 	/* end protect (multithreded version) */
644 }
645 
646 #else /*CFG_WITH_PAGER*/
647 
648 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
649 {
650 	/*
651 	 * Until PAGER is supported, trap CPU here.
652 	 */
653 	tee_pager_print_error_abort(ai);
654 	EMSG("Unexpected page fault! Trap CPU");
655 	panic();
656 }
657 
658 #endif /*CFG_WITH_PAGER*/
659 
660 #ifdef ARM32
661 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
662 		struct tee_pager_abort_info *ai)
663 {
664 	switch (abort_type) {
665 	case THREAD_ABORT_DATA:
666 		ai->fault_descr = read_dfsr();
667 		ai->va = read_dfar();
668 		break;
669 	case THREAD_ABORT_PREFETCH:
670 		ai->fault_descr = read_ifsr();
671 		ai->va = read_ifar();
672 		break;
673 	default:
674 		ai->fault_descr = 0;
675 		ai->va = regs->elr;
676 		break;
677 	}
678 	ai->abort_type = abort_type;
679 	ai->pc = regs->elr;
680 	ai->regs = regs;
681 }
682 #endif /*ARM32*/
683 
684 #ifdef ARM64
685 static void set_abort_info(uint32_t abort_type __unused,
686 		struct thread_abort_regs *regs, struct tee_pager_abort_info *ai)
687 {
688 	ai->fault_descr = read_esr_el1();
689 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
690 	case ESR_EC_IABT_EL0:
691 	case ESR_EC_IABT_EL1:
692 		ai->abort_type = THREAD_ABORT_PREFETCH;
693 		ai->va = read_far_el1();
694 		break;
695 	case ESR_EC_DABT_EL0:
696 	case ESR_EC_DABT_EL1:
697 	case ESR_EC_SP_ALIGN:
698 		ai->abort_type = THREAD_ABORT_DATA;
699 		ai->va = read_far_el1();
700 		break;
701 	default:
702 		ai->abort_type = THREAD_ABORT_UNDEF;
703 		ai->va = regs->elr;
704 	}
705 	ai->pc = regs->elr;
706 	ai->regs = regs;
707 }
708 #endif /*ARM64*/
709 
710 #ifdef ARM32
711 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
712 {
713 	/*
714 	 * It was a user exception, stop user execution and return
715 	 * to TEE Core.
716 	 */
717 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
718 	ai->regs->r1 = true;
719 	ai->regs->r2 = 0xdeadbeef;
720 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
721 	ai->regs->spsr = read_cpsr();
722 	ai->regs->spsr &= ~CPSR_MODE_MASK;
723 	ai->regs->spsr |= CPSR_MODE_SVC;
724 	ai->regs->spsr &= ~CPSR_FIA;
725 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
726 	/* Select Thumb or ARM mode */
727 	if (ai->regs->elr & 1)
728 		ai->regs->spsr |= CPSR_T;
729 	else
730 		ai->regs->spsr &= ~CPSR_T;
731 }
732 #endif /*ARM32*/
733 
734 #ifdef ARM64
735 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
736 {
737 	uint32_t daif;
738 
739 	/*
740 	 * It was a user exception, stop user execution and return
741 	 * to TEE Core.
742 	 */
743 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
744 	ai->regs->x1 = true;
745 	ai->regs->x2 = 0xdeadbeef;
746 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
747 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
748 
749 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
750 	/* XXX what about DAIF_D? */
751 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
752 }
753 #endif /*ARM64*/
754 
755 void tee_pager_abort_handler(uint32_t abort_type,
756 		struct thread_abort_regs *regs)
757 {
758 	struct tee_pager_abort_info ai;
759 
760 	set_abort_info(abort_type, regs, &ai);
761 
762 	switch (tee_pager_get_fault_type(&ai)) {
763 	case TEE_PAGER_FAULT_TYPE_IGNORE:
764 		break;
765 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
766 		handle_user_ta_panic(&ai);
767 		break;
768 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
769 	default:
770 		tee_pager_handle_fault(&ai);
771 		break;
772 	}
773 }
774 
775 #ifdef CFG_WITH_PAGER
776 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
777 {
778 	size_t n;
779 
780 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
781 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
782 
783 	/* setup memory */
784 	for (n = 0; n < npages; n++) {
785 		struct tee_pager_pmem *pmem;
786 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
787 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
788 		paddr_t pa;
789 		uint32_t attr;
790 
791 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
792 
793 		/* Ignore unmapped pages/blocks */
794 		if (!(attr & TEE_MATTR_VALID_BLOCK))
795 			continue;
796 
797 		pmem = malloc(sizeof(struct tee_pager_pmem));
798 		if (pmem == NULL) {
799 			DMSG("Can't allocate memory");
800 			panic();
801 		}
802 
803 		pmem->pgidx = pgidx;
804 		pmem->area = NULL;
805 
806 		if (unmap) {
807 			/*
808 			 * Note that we're making the page inaccessible
809 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
810 			 * indicate that the descriptor still holds a valid
811 			 * physical address of a page.
812 			 */
813 			core_mmu_set_entry(&tbl_info, pgidx, pa,
814 					   TEE_MATTR_PHYS_BLOCK);
815 		}
816 		tee_pager_npages++;
817 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
818 	}
819 
820 	if (unmap) {
821 		/* Invalidate secure TLB */
822 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
823 	}
824 }
825 
826 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
827 {
828 	bool unmaped = false;
829 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
830 
831 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
832 		panic();
833 
834 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
835 		unmaped |= tee_pager_release_one_zi(vaddr);
836 
837 	/* Invalidate secure TLB */
838 	if (unmaped)
839 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
840 
841 	thread_set_exceptions(exceptions);
842 }
843 
844 void *tee_pager_request_zi(size_t size)
845 {
846 	tee_mm_entry_t *mm;
847 
848 	if (!size)
849 		return NULL;
850 
851 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
852 	if (!mm)
853 		return NULL;
854 
855 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
856 
857 	return (void *)tee_mm_get_smem(mm);
858 }
859 #endif /*CFG_WITH_PAGER*/
860