xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 9403c583381528e7fb391e3769644cc9653cfbb6)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/queue.h>
29 #include <stdlib.h>
30 #include <inttypes.h>
31 #include <kernel/tee_common_unpg.h>
32 #include <kernel/tee_common.h>
33 #include <kernel/thread_defs.h>
34 #include <kernel/panic.h>
35 #include <mm/tee_mmu_defs.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/tee_kta_trace.h>
38 #include <kernel/misc.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tz_proc.h>
41 #include <mm/tee_pager.h>
42 #include <mm/tee_mm.h>
43 #include <mm/core_mmu.h>
44 #include <tee/arch_svc.h>
45 #include <arm.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <tee_api_defines.h>
48 #include <utee_defines.h>
49 #include <trace.h>
50 #include <util.h>
51 
52 struct tee_pager_abort_info {
53 	uint32_t abort_type;
54 	uint32_t fault_descr;
55 	vaddr_t va;
56 	uint32_t pc;
57 	struct thread_abort_regs *regs;
58 };
59 
60 enum tee_pager_fault_type {
61 	TEE_PAGER_FAULT_TYPE_USER_TA_PANIC,
62 	TEE_PAGER_FAULT_TYPE_PAGEABLE,
63 	TEE_PAGER_FAULT_TYPE_IGNORE,
64 };
65 
66 #ifdef CFG_WITH_PAGER
67 
68 struct tee_pager_area {
69 	const uint8_t *hashes;
70 	const uint8_t *store;
71 	uint32_t flags;
72 	tee_mm_entry_t *mm;
73 	TAILQ_ENTRY(tee_pager_area) link;
74 };
75 
76 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
77 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
78 
79 /*
80  * struct tee_pager_pmem - Represents a physical page used for paging.
81  *
82  * @pgidx	an index of the entry in tbl_info. The actual physical
83  *		address is stored here so even if the page isn't mapped,
84  *		there's always an MMU entry holding the physical address.
85  *
86  * @va_alias	Virtual address where the physical page always is aliased.
87  *		Used during remapping of the page when the content need to
88  *		be updated before it's available at the new location.
89  *
90  * @area	a pointer to the pager area
91  */
92 struct tee_pager_pmem {
93 	unsigned pgidx;
94 	void *va_alias;
95 	struct tee_pager_area *area;
96 	TAILQ_ENTRY(tee_pager_pmem) link;
97 };
98 
99 /* The list of physical pages. The first page in the list is the oldest */
100 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
101 
102 static struct tee_pager_pmem_head tee_pager_pmem_head =
103 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
104 
105 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
106 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
107 
108 /* number of pages hidden */
109 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
110 
111 /* Number of registered physical pages, used hiding pages. */
112 static size_t tee_pager_npages;
113 
114 #ifdef CFG_WITH_STATS
115 static struct tee_pager_stats pager_stats;
116 
117 static inline void incr_ro_hits(void)
118 {
119 	pager_stats.ro_hits++;
120 }
121 
122 static inline void incr_rw_hits(void)
123 {
124 	pager_stats.rw_hits++;
125 }
126 
127 static inline void incr_hidden_hits(void)
128 {
129 	pager_stats.hidden_hits++;
130 }
131 
132 static inline void incr_zi_released(void)
133 {
134 	pager_stats.zi_released++;
135 }
136 
137 static inline void incr_npages_all(void)
138 {
139 	pager_stats.npages_all++;
140 }
141 
142 static inline void set_npages(void)
143 {
144 	pager_stats.npages = tee_pager_npages;
145 }
146 
147 void tee_pager_get_stats(struct tee_pager_stats *stats)
148 {
149 	*stats = pager_stats;
150 
151 	pager_stats.hidden_hits = 0;
152 	pager_stats.ro_hits = 0;
153 	pager_stats.rw_hits = 0;
154 	pager_stats.zi_released = 0;
155 }
156 
157 #else /* CFG_WITH_STATS */
158 static inline void incr_ro_hits(void) { }
159 static inline void incr_rw_hits(void) { }
160 static inline void incr_hidden_hits(void) { }
161 static inline void incr_zi_released(void) { }
162 static inline void incr_npages_all(void) { }
163 static inline void set_npages(void) { }
164 
165 void tee_pager_get_stats(struct tee_pager_stats *stats)
166 {
167 	memset(stats, 0, sizeof(struct tee_pager_stats));
168 }
169 #endif /* CFG_WITH_STATS */
170 
171 /*
172  * Reference to translation table used to map the virtual memory range
173  * covered by the pager.
174  */
175 static struct core_mmu_table_info tbl_info;
176 
177 static unsigned pager_lock = SPINLOCK_UNLOCK;
178 
179 /* Defines the range of the alias area */
180 static tee_mm_entry_t *pager_alias_area;
181 /*
182  * Physical pages are added in a stack like fashion to the alias area,
183  * @pager_alias_next_free gives the address of next free entry if
184  * @pager_alias_next_free is != 0
185  */
186 static uintptr_t pager_alias_next_free;
187 
188 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
189 {
190 	struct core_mmu_table_info ti;
191 	size_t tbl_va_size;
192 	unsigned idx;
193 	unsigned last_idx;
194 	vaddr_t smem = tee_mm_get_smem(mm);
195 	size_t nbytes = tee_mm_get_bytes(mm);
196 
197 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
198 
199 	TEE_ASSERT(!pager_alias_area);
200 	if (!core_mmu_find_table(smem, UINT_MAX, &ti)) {
201 		DMSG("Can't find translation table");
202 		panic();
203 	}
204 	if ((1 << ti.shift) != SMALL_PAGE_SIZE) {
205 		DMSG("Unsupported page size in translation table %u",
206 		     1 << ti.shift);
207 		panic();
208 	}
209 
210 	tbl_va_size = (1 << ti.shift) * ti.num_entries;
211 	if (!core_is_buffer_inside(smem, nbytes,
212 				   ti.va_base, tbl_va_size)) {
213 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
214 			smem, nbytes, ti.va_base, tbl_va_size);
215 		panic();
216 	}
217 
218 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
219 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
220 
221 	pager_alias_area = mm;
222 	pager_alias_next_free = smem;
223 
224 	/* Clear all mapping in the alias area */
225 	idx = core_mmu_va2idx(&ti, smem);
226 	last_idx = core_mmu_va2idx(&ti, smem + nbytes);
227 	for (; idx < last_idx; idx++)
228 		core_mmu_set_entry(&ti, idx, 0, 0);
229 }
230 
231 static void *pager_add_alias_page(paddr_t pa)
232 {
233 	unsigned idx;
234 	struct core_mmu_table_info ti;
235 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
236 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
237 			TEE_MATTR_PRW;
238 
239 	DMSG("0x%" PRIxPA, pa);
240 
241 	TEE_ASSERT(pager_alias_next_free);
242 	if (!core_mmu_find_table(pager_alias_next_free, UINT_MAX, &ti))
243 		panic();
244 	idx = core_mmu_va2idx(&ti, pager_alias_next_free);
245 	core_mmu_set_entry(&ti, idx, pa, attr);
246 	pager_alias_next_free += SMALL_PAGE_SIZE;
247 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
248 				      tee_mm_get_bytes(pager_alias_area)))
249 		pager_alias_next_free = 0;
250 	return (void *)core_mmu_idx2va(&ti, idx);
251 }
252 
253 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
254 		const void *hashes)
255 {
256 	struct tee_pager_area *area;
257 	size_t tbl_va_size;
258 
259 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
260 		tee_mm_get_smem(mm),
261 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
262 		flags, store, hashes);
263 
264 	if (flags & TEE_PAGER_AREA_RO)
265 		TEE_ASSERT(store && hashes);
266 	else if (flags & TEE_PAGER_AREA_RW)
267 		TEE_ASSERT(!store && !hashes);
268 	else
269 		panic();
270 
271 	if (!tbl_info.num_entries) {
272 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
273 					&tbl_info))
274 			return false;
275 		if ((1 << tbl_info.shift) != SMALL_PAGE_SIZE) {
276 			DMSG("Unsupported page size in translation table %u",
277 			     1 << tbl_info.shift);
278 			return false;
279 		}
280 	}
281 
282 	tbl_va_size = (1 << tbl_info.shift) * tbl_info.num_entries;
283 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
284 				   tbl_info.va_base, tbl_va_size)) {
285 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
286 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
287 			tbl_info.va_base, tbl_va_size);
288 		return false;
289 	}
290 
291 	area = malloc(sizeof(struct tee_pager_area));
292 	if (!area)
293 		return false;
294 
295 	area->mm = mm;
296 	area->flags = flags;
297 	area->store = store;
298 	area->hashes = hashes;
299 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
300 	return true;
301 }
302 
303 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
304 {
305 	struct tee_pager_area *area;
306 
307 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
308 		tee_mm_entry_t *mm = area->mm;
309 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
310 
311 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
312 			return area;
313 	}
314 	return NULL;
315 }
316 
317 static uint32_t get_area_mattr(struct tee_pager_area *area)
318 {
319 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
320 			TEE_MATTR_CACHE_DEFAULT | TEE_MATTR_SECURE |
321 			TEE_MATTR_PR;
322 
323 	if (!(area->flags & TEE_PAGER_AREA_RO))
324 		attr |= TEE_MATTR_PW;
325 	if (area->flags & TEE_PAGER_AREA_X)
326 		attr |= TEE_MATTR_PX;
327 
328 	return attr;
329 }
330 
331 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
332 			void *va_alias)
333 {
334 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
335 
336 	if (area->store) {
337 		size_t rel_pg_idx = pg_idx - area->mm->offset;
338 		const void *stored_page = area->store +
339 					  rel_pg_idx * SMALL_PAGE_SIZE;
340 
341 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
342 		incr_ro_hits();
343 	} else {
344 		memset(va_alias, 0, SMALL_PAGE_SIZE);
345 		incr_rw_hits();
346 	}
347 }
348 
349 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
350 			void *va_alias)
351 {
352 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
353 
354 	if (area->store) {
355 		size_t rel_pg_idx = pg_idx - area->mm->offset;
356 		const void *hash = area->hashes +
357 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
358 
359 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
360 				TEE_SUCCESS) {
361 			EMSG("PH 0x%" PRIxVA " failed", page_va);
362 			panic();
363 		}
364 	}
365 }
366 
367 static bool tee_pager_unhide_page(vaddr_t page_va)
368 {
369 	struct tee_pager_pmem *pmem;
370 
371 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
372 		paddr_t pa;
373 		uint32_t attr;
374 
375 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
376 
377 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
378 			continue;
379 
380 		if (core_mmu_va2idx(&tbl_info, page_va) == pmem->pgidx) {
381 			/* page is hidden, show and move to back */
382 			core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
383 					   get_area_mattr(pmem->area));
384 
385 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
386 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
387 
388 			/* TODO only invalidate entry touched above */
389 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
390 
391 			incr_hidden_hits();
392 			return true;
393 		}
394 	}
395 
396 	return false;
397 }
398 
399 static void tee_pager_hide_pages(void)
400 {
401 	struct tee_pager_pmem *pmem;
402 	size_t n = 0;
403 
404 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
405 		paddr_t pa;
406 		uint32_t attr;
407 
408 		if (n >= TEE_PAGER_NHIDE)
409 			break;
410 		n++;
411 
412 		/*
413 		 * we cannot hide pages when pmem->area is not defined as
414 		 * unhide requires pmem->area to be defined
415 		 */
416 		if (!pmem->area)
417 			continue;
418 
419 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
420 		if (!(attr & TEE_MATTR_VALID_BLOCK))
421 			continue;
422 
423 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
424 				   TEE_MATTR_HIDDEN_BLOCK);
425 
426 	}
427 
428 	/* TODO only invalidate entries touched above */
429 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
430 }
431 
432 /*
433  * Find mapped pmem, hide and move to pageble pmem.
434  * Return false if page was not mapped, and true if page was mapped.
435  */
436 static bool tee_pager_release_one_zi(vaddr_t page_va)
437 {
438 	struct tee_pager_pmem *pmem;
439 	unsigned pgidx;
440 	paddr_t pa;
441 	uint32_t attr;
442 
443 	pgidx = core_mmu_va2idx(&tbl_info, page_va);
444 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
445 
446 #ifdef TEE_PAGER_DEBUG_PRINT
447 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
448 #endif
449 
450 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
451 		if (pmem->pgidx != pgidx)
452 			continue;
453 
454 		core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
455 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
456 		tee_pager_npages++;
457 		set_npages();
458 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
459 		incr_zi_released();
460 
461 
462 		return true;
463 	}
464 
465 	return false;
466 }
467 #endif /*CFG_WITH_PAGER*/
468 
469 #ifdef ARM32
470 /* Returns true if the exception originated from user mode */
471 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
472 {
473 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
474 }
475 #endif /*ARM32*/
476 
477 #ifdef ARM64
478 /* Returns true if the exception originated from user mode */
479 static bool tee_pager_is_user_exception(struct tee_pager_abort_info *ai)
480 {
481 	uint32_t spsr = ai->regs->spsr;
482 
483 	if (spsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
484 		return true;
485 	if (((spsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
486 	    SPSR_64_MODE_EL0)
487 		return true;
488 	return false;
489 }
490 #endif /*ARM64*/
491 
492 #ifdef ARM32
493 /* Returns true if the exception originated from abort mode */
494 static bool tee_pager_is_abort_in_abort_handler(struct tee_pager_abort_info *ai)
495 {
496 	return (ai->regs->spsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_ABT;
497 }
498 #endif /*ARM32*/
499 
500 #ifdef ARM64
501 /* Returns true if the exception originated from abort mode */
502 static bool tee_pager_is_abort_in_abort_handler(
503 		struct tee_pager_abort_info *ai __unused)
504 {
505 	return false;
506 }
507 #endif /*ARM64*/
508 
509 static __unused const char *abort_type_to_str(uint32_t abort_type)
510 {
511 	if (abort_type == THREAD_ABORT_DATA)
512 		return "data";
513 	if (abort_type == THREAD_ABORT_PREFETCH)
514 		return "prefetch";
515 	return "undef";
516 }
517 
518 static __unused void tee_pager_print_detailed_abort(
519 				struct tee_pager_abort_info *ai __unused,
520 				const char *ctx __unused)
521 {
522 	EMSG_RAW("\n");
523 	EMSG_RAW("%s %s-abort at address 0x%" PRIxVA "\n",
524 		ctx, abort_type_to_str(ai->abort_type), ai->va);
525 #ifdef ARM32
526 	EMSG_RAW(" fsr 0x%08x  ttbr0 0x%08x  ttbr1 0x%08x  cidr 0x%X\n",
527 		 ai->fault_descr, read_ttbr0(), read_ttbr1(),
528 		 read_contextidr());
529 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
530 		 get_core_pos(), ai->regs->spsr);
531 	EMSG_RAW(" r0 0x%08x      r4 0x%08x    r8 0x%08x   r12 0x%08x\n",
532 		 ai->regs->r0, ai->regs->r4, ai->regs->r8, ai->regs->ip);
533 	EMSG_RAW(" r1 0x%08x      r5 0x%08x    r9 0x%08x    sp 0x%08x\n",
534 		 ai->regs->r1, ai->regs->r5, ai->regs->r9,
535 		 read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK));
536 	EMSG_RAW(" r2 0x%08x      r6 0x%08x   r10 0x%08x    lr 0x%08x\n",
537 		 ai->regs->r2, ai->regs->r6, ai->regs->r10,
538 		 read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK));
539 	EMSG_RAW(" r3 0x%08x      r7 0x%08x   r11 0x%08x    pc 0x%08x\n",
540 		 ai->regs->r3, ai->regs->r7, ai->regs->r11, ai->pc);
541 #endif /*ARM32*/
542 #ifdef ARM64
543 	EMSG_RAW(" esr 0x%08x  ttbr0 0x%08" PRIx64 "   ttbr1 0x%08" PRIx64 "   cidr 0x%X\n",
544 		 ai->fault_descr, read_ttbr0_el1(), read_ttbr1_el1(),
545 		 read_contextidr_el1());
546 	EMSG_RAW(" cpu #%zu          cpsr 0x%08x\n",
547 		 get_core_pos(), (uint32_t)ai->regs->spsr);
548 	EMSG_RAW("x0  %016" PRIx64 " x1  %016" PRIx64,
549 		 ai->regs->x0, ai->regs->x1);
550 	EMSG_RAW("x2  %016" PRIx64 " x3  %016" PRIx64,
551 		 ai->regs->x2, ai->regs->x3);
552 	EMSG_RAW("x4  %016" PRIx64 " x5  %016" PRIx64,
553 		 ai->regs->x4, ai->regs->x5);
554 	EMSG_RAW("x6  %016" PRIx64 " x7  %016" PRIx64,
555 		 ai->regs->x6, ai->regs->x7);
556 	EMSG_RAW("x8  %016" PRIx64 " x9  %016" PRIx64,
557 		 ai->regs->x8, ai->regs->x9);
558 	EMSG_RAW("x10 %016" PRIx64 " x11 %016" PRIx64,
559 		 ai->regs->x10, ai->regs->x11);
560 	EMSG_RAW("x12 %016" PRIx64 " x13 %016" PRIx64,
561 		 ai->regs->x12, ai->regs->x13);
562 	EMSG_RAW("x14 %016" PRIx64 " x15 %016" PRIx64,
563 		 ai->regs->x14, ai->regs->x15);
564 	EMSG_RAW("x16 %016" PRIx64 " x17 %016" PRIx64,
565 		 ai->regs->x16, ai->regs->x17);
566 	EMSG_RAW("x18 %016" PRIx64 " x19 %016" PRIx64,
567 		 ai->regs->x18, ai->regs->x19);
568 	EMSG_RAW("x20 %016" PRIx64 " x21 %016" PRIx64,
569 		 ai->regs->x20, ai->regs->x21);
570 	EMSG_RAW("x22 %016" PRIx64 " x23 %016" PRIx64,
571 		 ai->regs->x22, ai->regs->x23);
572 	EMSG_RAW("x24 %016" PRIx64 " x25 %016" PRIx64,
573 		 ai->regs->x24, ai->regs->x25);
574 	EMSG_RAW("x26 %016" PRIx64 " x27 %016" PRIx64,
575 		 ai->regs->x26, ai->regs->x27);
576 	EMSG_RAW("x28 %016" PRIx64 " x29 %016" PRIx64,
577 		 ai->regs->x28, ai->regs->x29);
578 	EMSG_RAW("x30 %016" PRIx64 " elr %016" PRIx64,
579 		 ai->regs->x30, ai->regs->elr);
580 	EMSG_RAW("sp_el0 %016" PRIx64, ai->regs->sp_el0);
581 #endif /*ARM64*/
582 }
583 
584 static void tee_pager_print_user_abort(struct tee_pager_abort_info *ai __unused)
585 {
586 #ifdef CFG_TEE_CORE_TA_TRACE
587 	tee_pager_print_detailed_abort(ai, "user TA");
588 	tee_ta_dump_current();
589 #endif
590 }
591 
592 static void tee_pager_print_abort(struct tee_pager_abort_info *ai __unused)
593 {
594 #if (TRACE_LEVEL >= TRACE_INFO)
595 	tee_pager_print_detailed_abort(ai, "core");
596 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
597 }
598 
599 static void tee_pager_print_error_abort(
600 		struct tee_pager_abort_info *ai __unused)
601 {
602 #if (TRACE_LEVEL >= TRACE_INFO)
603 	/* full verbose log at DEBUG level */
604 	tee_pager_print_detailed_abort(ai, "core");
605 #else
606 #ifdef ARM32
607 	EMSG("%s-abort at 0x%" PRIxVA "\n"
608 	     "FSR 0x%x PC 0x%x TTBR0 0x%X CONTEXIDR 0x%X\n"
609 	     "CPUID 0x%x CPSR 0x%x (read from SPSR)",
610 	     abort_type_to_str(ai->abort_type),
611 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0(), read_contextidr(),
612 	     read_mpidr(), read_spsr());
613 #endif /*ARM32*/
614 #ifdef ARM64
615 	EMSG("%s-abort at 0x%" PRIxVA "\n"
616 	     "ESR 0x%x PC 0x%x TTBR0 0x%" PRIx64 " CONTEXIDR 0x%X\n"
617 	     "CPUID 0x%" PRIx64 " CPSR 0x%x (read from SPSR)",
618 	     abort_type_to_str(ai->abort_type),
619 	     ai->va, ai->fault_descr, ai->pc, read_ttbr0_el1(),
620 	     read_contextidr_el1(),
621 	     read_mpidr_el1(), (uint32_t)ai->regs->spsr);
622 #endif /*ARM64*/
623 #endif /*TRACE_LEVEL >= TRACE_DEBUG*/
624 }
625 
626 static enum tee_pager_fault_type tee_pager_get_fault_type(
627 		struct tee_pager_abort_info *ai)
628 {
629 	if (tee_pager_is_user_exception(ai)) {
630 		tee_pager_print_user_abort(ai);
631 		DMSG("[TEE_PAGER] abort in User mode (TA will panic)");
632 		return TEE_PAGER_FAULT_TYPE_USER_TA_PANIC;
633 	}
634 
635 	if (tee_pager_is_abort_in_abort_handler(ai)) {
636 		tee_pager_print_error_abort(ai);
637 		EMSG("[PAGER] abort in abort handler (trap CPU)");
638 		panic();
639 	}
640 
641 	if (ai->abort_type == THREAD_ABORT_UNDEF) {
642 		tee_pager_print_error_abort(ai);
643 		EMSG("[TEE_PAGER] undefined abort (trap CPU)");
644 		panic();
645 	}
646 
647 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
648 	case CORE_MMU_FAULT_ALIGNMENT:
649 		tee_pager_print_error_abort(ai);
650 		EMSG("[TEE_PAGER] alignement fault!  (trap CPU)");
651 		panic();
652 		break;
653 
654 	case CORE_MMU_FAULT_ACCESS_BIT:
655 		tee_pager_print_error_abort(ai);
656 		EMSG("[TEE_PAGER] access bit fault!  (trap CPU)");
657 		panic();
658 		break;
659 
660 	case CORE_MMU_FAULT_DEBUG_EVENT:
661 		tee_pager_print_abort(ai);
662 		DMSG("[TEE_PAGER] Ignoring debug event!");
663 		return TEE_PAGER_FAULT_TYPE_IGNORE;
664 
665 	case CORE_MMU_FAULT_TRANSLATION:
666 	case CORE_MMU_FAULT_WRITE_PERMISSION:
667 	case CORE_MMU_FAULT_READ_PERMISSION:
668 		return TEE_PAGER_FAULT_TYPE_PAGEABLE;
669 
670 	case CORE_MMU_FAULT_ASYNC_EXTERNAL:
671 		tee_pager_print_abort(ai);
672 		DMSG("[TEE_PAGER] Ignoring async external abort!");
673 		return TEE_PAGER_FAULT_TYPE_IGNORE;
674 
675 	case CORE_MMU_FAULT_OTHER:
676 	default:
677 		tee_pager_print_abort(ai);
678 		DMSG("[TEE_PAGER] Unhandled fault!");
679 		return TEE_PAGER_FAULT_TYPE_IGNORE;
680 	}
681 }
682 
683 
684 #ifdef CFG_WITH_PAGER
685 
686 /* Finds the oldest page and remaps it for the new virtual address */
687 static bool tee_pager_get_page(struct tee_pager_abort_info *ai,
688 			struct tee_pager_area *area,
689 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
690 {
691 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
692 	struct tee_pager_pmem *pmem;
693 	paddr_t pa;
694 	uint32_t attr;
695 
696 	core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
697 
698 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
699 
700 	if (attr & TEE_MATTR_PHYS_BLOCK) {
701 		/*
702 		 * There's an pmem entry using this mmu entry, let's use
703 		 * that entry in the new mapping.
704 		 */
705 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
706 			if (pmem->pgidx == pgidx)
707 				break;
708 		}
709 		if (!pmem) {
710 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
711 			return false;
712 		}
713 	} else {
714 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
715 		if (!pmem) {
716 			DMSG("No pmem entries");
717 			return false;
718 		}
719 		core_mmu_get_entry(&tbl_info, pmem->pgidx, &pa, &attr);
720 		core_mmu_set_entry(&tbl_info, pmem->pgidx, 0, 0);
721 	}
722 
723 	pmem->pgidx = pgidx;
724 	pmem->area = area;
725 	core_mmu_set_entry(&tbl_info, pgidx, pa, TEE_MATTR_PHYS_BLOCK);
726 
727 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
728 	if (area->store) {
729 		/* move page to back */
730 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
731 	} else {
732 		/* Move page to rw list */
733 		TEE_ASSERT(tee_pager_npages > 0);
734 		tee_pager_npages--;
735 		set_npages();
736 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
737 	}
738 
739 	/* TODO only invalidate entries touched above */
740 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
741 
742 	*pmem_ret = pmem;
743 	*pa_ret = pa;
744 	return true;
745 }
746 
747 static bool pager_check_access(struct tee_pager_abort_info *ai)
748 {
749 	unsigned pgidx = core_mmu_va2idx(&tbl_info, ai->va);
750 	uint32_t attr;
751 
752 	core_mmu_get_entry(&tbl_info, pgidx, NULL, &attr);
753 
754 	/* Not mapped */
755 	if (!(attr & TEE_MATTR_VALID_BLOCK))
756 		return false;
757 
758 	/* Not readable, should not happen */
759 	if (!(attr & TEE_MATTR_PR)) {
760 		tee_pager_print_error_abort(ai);
761 		panic();
762 	}
763 
764 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
765 	case CORE_MMU_FAULT_TRANSLATION:
766 	case CORE_MMU_FAULT_READ_PERMISSION:
767 		if (ai->abort_type == THREAD_ABORT_PREFETCH &&
768 		    !(attr & TEE_MATTR_PX)) {
769 			/* Attempting to execute from an NOX page */
770 			tee_pager_print_error_abort(ai);
771 			panic();
772 		}
773 		/* Since the page is mapped now it's OK */
774 		return true;
775 	case CORE_MMU_FAULT_WRITE_PERMISSION:
776 		if (!(attr & TEE_MATTR_PW)) {
777 			/* Attempting to write to an RO page */
778 			tee_pager_print_error_abort(ai);
779 			panic();
780 		}
781 		return true;
782 	default:
783 		/* Some fault we can't deal with */
784 		tee_pager_print_error_abort(ai);
785 		panic();
786 	}
787 
788 }
789 
790 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
791 {
792 	struct tee_pager_area *area;
793 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
794 	uint32_t exceptions;
795 
796 #ifdef TEE_PAGER_DEBUG_PRINT
797 	tee_pager_print_abort(ai);
798 #endif
799 
800 	/* check if the access is valid */
801 	area = tee_pager_find_area(ai->va);
802 	if (!area) {
803 		tee_pager_print_abort(ai);
804 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
805 		panic();
806 	}
807 
808 	/*
809 	 * We're updating pages that can affect several active CPUs at a
810 	 * time below. We end up here because a thread tries to access some
811 	 * memory that isn't available. We have to be careful when making
812 	 * that memory available as other threads may succeed in accessing
813 	 * that address the moment after we've made it available.
814 	 *
815 	 * That means that we can't just map the memory and populate the
816 	 * page, instead we use the aliased mapping to populate the page
817 	 * and once everything is ready we map it.
818 	 */
819 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
820 	cpu_spin_lock(&pager_lock);
821 
822 	if (!tee_pager_unhide_page(page_va)) {
823 		struct tee_pager_pmem *pmem = NULL;
824 		paddr_t pa = 0;
825 
826 		/*
827 		 * The page wasn't hidden, but some other core may have
828 		 * updated the table entry before we got here.
829 		 */
830 		if (pager_check_access(ai)) {
831 			/*
832 			 * Kind of access is OK with the mapping, we're
833 			 * done here because the fault has already been
834 			 * dealt with by another core.
835 			 */
836 			goto out;
837 		}
838 
839 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
840 			tee_pager_print_abort(ai);
841 			panic();
842 		}
843 
844 		/* load page code & data */
845 		tee_pager_load_page(area, page_va, pmem->va_alias);
846 		tee_pager_verify_page(area, page_va, pmem->va_alias);
847 
848 		/*
849 		 * We've updated the page using the aliased mapping and
850 		 * some cache maintenence is now needed if it's an
851 		 * executable page.
852 		 *
853 		 * Since the d-cache is a Physically-indexed,
854 		 * physically-tagged (PIPT) cache we can clean the aliased
855 		 * address instead of the real virtual address.
856 		 *
857 		 * The i-cache can also be PIPT, but may be something else
858 		 * to, to keep it simple we invalidate the entire i-cache.
859 		 * As a future optimization we may invalidate only the
860 		 * aliased area if it a PIPT cache else the entire cache.
861 		 */
862 		if (area->flags & TEE_PAGER_AREA_X) {
863 			/*
864 			 * Doing these operations to LoUIS (Level of
865 			 * unification, Inner Shareable) would be enough
866 			 */
867 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
868 				pmem->va_alias, SMALL_PAGE_SIZE);
869 
870 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
871 		}
872 
873 		core_mmu_set_entry(&tbl_info, pmem->pgidx, pa,
874 				   get_area_mattr(area));
875 
876 #ifdef TEE_PAGER_DEBUG_PRINT
877 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
878 		     core_mmu_idx2va(&tbl_info, pmem->pgidx), pa);
879 #endif
880 
881 	}
882 
883 	tee_pager_hide_pages();
884 out:
885 	cpu_spin_unlock(&pager_lock);
886 	thread_unmask_exceptions(exceptions);
887 }
888 
889 #else /*CFG_WITH_PAGER*/
890 
891 static void tee_pager_handle_fault(struct tee_pager_abort_info *ai)
892 {
893 	/*
894 	 * Until PAGER is supported, trap CPU here.
895 	 */
896 	tee_pager_print_error_abort(ai);
897 	EMSG("Unexpected page fault! Trap CPU");
898 	panic();
899 }
900 
901 #endif /*CFG_WITH_PAGER*/
902 
903 #ifdef ARM32
904 static void set_abort_info(uint32_t abort_type, struct thread_abort_regs *regs,
905 		struct tee_pager_abort_info *ai)
906 {
907 	switch (abort_type) {
908 	case THREAD_ABORT_DATA:
909 		ai->fault_descr = read_dfsr();
910 		ai->va = read_dfar();
911 		break;
912 	case THREAD_ABORT_PREFETCH:
913 		ai->fault_descr = read_ifsr();
914 		ai->va = read_ifar();
915 		break;
916 	default:
917 		ai->fault_descr = 0;
918 		ai->va = regs->elr;
919 		break;
920 	}
921 	ai->abort_type = abort_type;
922 	ai->pc = regs->elr;
923 	ai->regs = regs;
924 }
925 #endif /*ARM32*/
926 
927 #ifdef ARM64
928 static void set_abort_info(uint32_t abort_type __unused,
929 		struct thread_abort_regs *regs, struct tee_pager_abort_info *ai)
930 {
931 	ai->fault_descr = read_esr_el1();
932 	switch ((ai->fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
933 	case ESR_EC_IABT_EL0:
934 	case ESR_EC_IABT_EL1:
935 		ai->abort_type = THREAD_ABORT_PREFETCH;
936 		ai->va = read_far_el1();
937 		break;
938 	case ESR_EC_DABT_EL0:
939 	case ESR_EC_DABT_EL1:
940 	case ESR_EC_SP_ALIGN:
941 		ai->abort_type = THREAD_ABORT_DATA;
942 		ai->va = read_far_el1();
943 		break;
944 	default:
945 		ai->abort_type = THREAD_ABORT_UNDEF;
946 		ai->va = regs->elr;
947 	}
948 	ai->pc = regs->elr;
949 	ai->regs = regs;
950 }
951 #endif /*ARM64*/
952 
953 #ifdef ARM32
954 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
955 {
956 	/*
957 	 * It was a user exception, stop user execution and return
958 	 * to TEE Core.
959 	 */
960 	ai->regs->r0 = TEE_ERROR_TARGET_DEAD;
961 	ai->regs->r1 = true;
962 	ai->regs->r2 = 0xdeadbeef;
963 	ai->regs->elr = (uint32_t)thread_unwind_user_mode;
964 	ai->regs->spsr = read_cpsr();
965 	ai->regs->spsr &= ~CPSR_MODE_MASK;
966 	ai->regs->spsr |= CPSR_MODE_SVC;
967 	ai->regs->spsr &= ~CPSR_FIA;
968 	ai->regs->spsr |= read_spsr() & CPSR_FIA;
969 	/* Select Thumb or ARM mode */
970 	if (ai->regs->elr & 1)
971 		ai->regs->spsr |= CPSR_T;
972 	else
973 		ai->regs->spsr &= ~CPSR_T;
974 }
975 #endif /*ARM32*/
976 
977 #ifdef ARM64
978 static void handle_user_ta_panic(struct tee_pager_abort_info *ai)
979 {
980 	uint32_t daif;
981 
982 	/*
983 	 * It was a user exception, stop user execution and return
984 	 * to TEE Core.
985 	 */
986 	ai->regs->x0 = TEE_ERROR_TARGET_DEAD;
987 	ai->regs->x1 = true;
988 	ai->regs->x2 = 0xdeadbeef;
989 	ai->regs->elr = (vaddr_t)thread_unwind_user_mode;
990 	ai->regs->sp_el0 = thread_get_saved_thread_sp();
991 
992 	daif = (ai->regs->spsr >> SPSR_32_AIF_SHIFT) & SPSR_32_AIF_MASK;
993 	/* XXX what about DAIF_D? */
994 	ai->regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, daif);
995 }
996 #endif /*ARM64*/
997 
998 void tee_pager_abort_handler(uint32_t abort_type,
999 		struct thread_abort_regs *regs)
1000 {
1001 	struct tee_pager_abort_info ai;
1002 
1003 	set_abort_info(abort_type, regs, &ai);
1004 
1005 	switch (tee_pager_get_fault_type(&ai)) {
1006 	case TEE_PAGER_FAULT_TYPE_IGNORE:
1007 		break;
1008 	case TEE_PAGER_FAULT_TYPE_USER_TA_PANIC:
1009 		handle_user_ta_panic(&ai);
1010 		break;
1011 	case TEE_PAGER_FAULT_TYPE_PAGEABLE:
1012 	default:
1013 		tee_pager_handle_fault(&ai);
1014 		break;
1015 	}
1016 }
1017 
1018 #ifdef CFG_WITH_PAGER
1019 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1020 {
1021 	size_t n;
1022 
1023 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1024 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1025 
1026 	/* setup memory */
1027 	for (n = 0; n < npages; n++) {
1028 		struct tee_pager_pmem *pmem;
1029 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1030 		unsigned pgidx = core_mmu_va2idx(&tbl_info, va);
1031 		paddr_t pa;
1032 		uint32_t attr;
1033 
1034 		core_mmu_get_entry(&tbl_info, pgidx, &pa, &attr);
1035 
1036 		/* Ignore unmapped pages/blocks */
1037 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1038 			continue;
1039 
1040 		pmem = malloc(sizeof(struct tee_pager_pmem));
1041 		if (pmem == NULL) {
1042 			DMSG("Can't allocate memory");
1043 			panic();
1044 		}
1045 
1046 		pmem->pgidx = pgidx;
1047 		pmem->va_alias = pager_add_alias_page(pa);
1048 
1049 		if (unmap) {
1050 			/*
1051 			 * Note that we're making the page inaccessible
1052 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
1053 			 * indicate that the descriptor still holds a valid
1054 			 * physical address of a page.
1055 			 */
1056 			pmem->area = NULL;
1057 			core_mmu_set_entry(&tbl_info, pgidx, pa,
1058 					   TEE_MATTR_PHYS_BLOCK);
1059 		} else {
1060 			/*
1061 			 * The page is still mapped, let's assign the area
1062 			 * and update the protection bits accordingly.
1063 			 */
1064 			pmem->area = tee_pager_find_area(va);
1065 			core_mmu_set_entry(&tbl_info, pgidx, pa,
1066 					   get_area_mattr(pmem->area));
1067 		}
1068 
1069 		tee_pager_npages++;
1070 		incr_npages_all();
1071 		set_npages();
1072 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1073 	}
1074 
1075 	/* Invalidate secure TLB */
1076 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1077 }
1078 
1079 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
1080 {
1081 	bool unmaped = false;
1082 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1083 
1084 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
1085 		panic();
1086 
1087 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
1088 		unmaped |= tee_pager_release_one_zi(vaddr);
1089 
1090 	/* Invalidate secure TLB */
1091 	if (unmaped)
1092 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1093 
1094 	thread_set_exceptions(exceptions);
1095 }
1096 
1097 void *tee_pager_request_zi(size_t size)
1098 {
1099 	tee_mm_entry_t *mm;
1100 
1101 	if (!size)
1102 		return NULL;
1103 
1104 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1105 	if (!mm)
1106 		return NULL;
1107 
1108 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
1109 
1110 	return (void *)tee_mm_get_smem(mm);
1111 }
1112 
1113 #else /*CFG_WITH_PAGER*/
1114 
1115 void tee_pager_get_stats(struct tee_pager_stats *stats)
1116 {
1117 	memset(stats, 0, sizeof(struct tee_pager_stats));
1118 }
1119 
1120 #endif /*CFG_WITH_PAGER*/
1121