xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision b8d220d24434dd394e4adb70d93ecf4f3945c220)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/queue.h>
30 #include <kernel/abort.h>
31 #include <kernel/panic.h>
32 #include <kernel/tee_misc.h>
33 #include <kernel/tee_ta_manager.h>
34 #include <kernel/thread.h>
35 #include <kernel/tz_proc.h>
36 #include <mm/core_mmu.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_mmu_defs.h>
39 #include <mm/tee_pager.h>
40 #include <types_ext.h>
41 #include <stdlib.h>
42 #include <tee_api_defines.h>
43 #include <tee/tee_cryp_provider.h>
44 #include <trace.h>
45 #include <utee_defines.h>
46 #include <util.h>
47 
48 struct tee_pager_area {
49 	const uint8_t *hashes;
50 	const uint8_t *store;
51 	uint32_t flags;
52 	tee_mm_entry_t *mm;
53 	TAILQ_ENTRY(tee_pager_area) link;
54 };
55 
56 static TAILQ_HEAD(tee_pager_area_head, tee_pager_area) tee_pager_area_head =
57 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
58 
59 /*
60  * struct tee_pager_pmem - Represents a physical page used for paging.
61  *
62  * @pgidx	an index of the entry in tee_pager_tbl_info. The actual physical
63  *		address is stored here so even if the page isn't mapped,
64  *		there's always an MMU entry holding the physical address.
65  *
66  * @va_alias	Virtual address where the physical page always is aliased.
67  *		Used during remapping of the page when the content need to
68  *		be updated before it's available at the new location.
69  *
70  * @area	a pointer to the pager area
71  */
72 struct tee_pager_pmem {
73 	unsigned pgidx;
74 	void *va_alias;
75 	struct tee_pager_area *area;
76 	TAILQ_ENTRY(tee_pager_pmem) link;
77 };
78 
79 /* The list of physical pages. The first page in the list is the oldest */
80 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
81 
82 static struct tee_pager_pmem_head tee_pager_pmem_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
84 
85 static struct tee_pager_pmem_head tee_pager_rw_pmem_head =
86 	TAILQ_HEAD_INITIALIZER(tee_pager_rw_pmem_head);
87 
88 /* number of pages hidden */
89 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
90 
91 /* Number of registered physical pages, used hiding pages. */
92 static size_t tee_pager_npages;
93 
94 #ifdef CFG_WITH_STATS
95 static struct tee_pager_stats pager_stats;
96 
97 static inline void incr_ro_hits(void)
98 {
99 	pager_stats.ro_hits++;
100 }
101 
102 static inline void incr_rw_hits(void)
103 {
104 	pager_stats.rw_hits++;
105 }
106 
107 static inline void incr_hidden_hits(void)
108 {
109 	pager_stats.hidden_hits++;
110 }
111 
112 static inline void incr_zi_released(void)
113 {
114 	pager_stats.zi_released++;
115 }
116 
117 static inline void incr_npages_all(void)
118 {
119 	pager_stats.npages_all++;
120 }
121 
122 static inline void set_npages(void)
123 {
124 	pager_stats.npages = tee_pager_npages;
125 }
126 
127 void tee_pager_get_stats(struct tee_pager_stats *stats)
128 {
129 	*stats = pager_stats;
130 
131 	pager_stats.hidden_hits = 0;
132 	pager_stats.ro_hits = 0;
133 	pager_stats.rw_hits = 0;
134 	pager_stats.zi_released = 0;
135 }
136 
137 #else /* CFG_WITH_STATS */
138 static inline void incr_ro_hits(void) { }
139 static inline void incr_rw_hits(void) { }
140 static inline void incr_hidden_hits(void) { }
141 static inline void incr_zi_released(void) { }
142 static inline void incr_npages_all(void) { }
143 static inline void set_npages(void) { }
144 
145 void tee_pager_get_stats(struct tee_pager_stats *stats)
146 {
147 	memset(stats, 0, sizeof(struct tee_pager_stats));
148 }
149 #endif /* CFG_WITH_STATS */
150 
151 struct core_mmu_table_info tee_pager_tbl_info;
152 
153 static unsigned pager_lock = SPINLOCK_UNLOCK;
154 
155 /* Defines the range of the alias area */
156 static tee_mm_entry_t *pager_alias_area;
157 /*
158  * Physical pages are added in a stack like fashion to the alias area,
159  * @pager_alias_next_free gives the address of next free entry if
160  * @pager_alias_next_free is != 0
161  */
162 static uintptr_t pager_alias_next_free;
163 
164 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
165 {
166 	struct core_mmu_table_info ti;
167 	size_t tbl_va_size;
168 	unsigned idx;
169 	unsigned last_idx;
170 	vaddr_t smem = tee_mm_get_smem(mm);
171 	size_t nbytes = tee_mm_get_bytes(mm);
172 
173 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
174 
175 	TEE_ASSERT(!pager_alias_area);
176 	if (!core_mmu_find_table(smem, UINT_MAX, &ti)) {
177 		DMSG("Can't find translation table");
178 		panic();
179 	}
180 	if ((1 << ti.shift) != SMALL_PAGE_SIZE) {
181 		DMSG("Unsupported page size in translation table %u",
182 		     1 << ti.shift);
183 		panic();
184 	}
185 
186 	tbl_va_size = (1 << ti.shift) * ti.num_entries;
187 	if (!core_is_buffer_inside(smem, nbytes,
188 				   ti.va_base, tbl_va_size)) {
189 		DMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
190 			smem, nbytes, ti.va_base, tbl_va_size);
191 		panic();
192 	}
193 
194 	TEE_ASSERT(!(smem & SMALL_PAGE_MASK));
195 	TEE_ASSERT(!(nbytes & SMALL_PAGE_MASK));
196 
197 	pager_alias_area = mm;
198 	pager_alias_next_free = smem;
199 
200 	/* Clear all mapping in the alias area */
201 	idx = core_mmu_va2idx(&ti, smem);
202 	last_idx = core_mmu_va2idx(&ti, smem + nbytes);
203 	for (; idx < last_idx; idx++)
204 		core_mmu_set_entry(&ti, idx, 0, 0);
205 }
206 
207 static void *pager_add_alias_page(paddr_t pa)
208 {
209 	unsigned idx;
210 	struct core_mmu_table_info ti;
211 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
212 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
213 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
214 
215 	DMSG("0x%" PRIxPA, pa);
216 
217 	TEE_ASSERT(pager_alias_next_free);
218 	if (!core_mmu_find_table(pager_alias_next_free, UINT_MAX, &ti))
219 		panic();
220 	idx = core_mmu_va2idx(&ti, pager_alias_next_free);
221 	core_mmu_set_entry(&ti, idx, pa, attr);
222 	pager_alias_next_free += SMALL_PAGE_SIZE;
223 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
224 				      tee_mm_get_bytes(pager_alias_area)))
225 		pager_alias_next_free = 0;
226 	return (void *)core_mmu_idx2va(&ti, idx);
227 }
228 
229 bool tee_pager_add_area(tee_mm_entry_t *mm, uint32_t flags, const void *store,
230 		const void *hashes)
231 {
232 	struct tee_pager_area *area;
233 	size_t tbl_va_size;
234 
235 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
236 		tee_mm_get_smem(mm),
237 		tee_mm_get_smem(mm) + (mm->size << mm->pool->shift),
238 		flags, store, hashes);
239 
240 	if (flags & TEE_PAGER_AREA_RO)
241 		TEE_ASSERT(store && hashes);
242 	else if (flags & TEE_PAGER_AREA_RW)
243 		TEE_ASSERT(!store && !hashes);
244 	else
245 		panic();
246 
247 	if (!tee_pager_tbl_info.num_entries) {
248 		if (!core_mmu_find_table(tee_mm_get_smem(mm), UINT_MAX,
249 					&tee_pager_tbl_info))
250 			return false;
251 		if ((1 << tee_pager_tbl_info.shift) != SMALL_PAGE_SIZE) {
252 			DMSG("Unsupported page size in translation table %u",
253 			     1 << tee_pager_tbl_info.shift);
254 			return false;
255 		}
256 	}
257 
258 	tbl_va_size = (1 << tee_pager_tbl_info.shift) *
259 			tee_pager_tbl_info.num_entries;
260 	if (!core_is_buffer_inside(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
261 				   tee_pager_tbl_info.va_base, tbl_va_size)) {
262 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
263 			tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
264 			tee_pager_tbl_info.va_base, tbl_va_size);
265 		return false;
266 	}
267 
268 	area = malloc(sizeof(struct tee_pager_area));
269 	if (!area)
270 		return false;
271 
272 	area->mm = mm;
273 	area->flags = flags;
274 	area->store = store;
275 	area->hashes = hashes;
276 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
277 	return true;
278 }
279 
280 static struct tee_pager_area *tee_pager_find_area(vaddr_t va)
281 {
282 	struct tee_pager_area *area;
283 
284 	TAILQ_FOREACH(area, &tee_pager_area_head, link) {
285 		tee_mm_entry_t *mm = area->mm;
286 		size_t offset = (va - mm->pool->lo) >> mm->pool->shift;
287 
288 		if (offset >= mm->offset && offset < (mm->offset + mm->size))
289 			return area;
290 	}
291 	return NULL;
292 }
293 
294 static uint32_t get_area_mattr(struct tee_pager_area *area)
295 {
296 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
297 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
298 			TEE_MATTR_SECURE | TEE_MATTR_PR;
299 
300 	if (!(area->flags & TEE_PAGER_AREA_RO))
301 		attr |= TEE_MATTR_PW;
302 	if (area->flags & TEE_PAGER_AREA_X)
303 		attr |= TEE_MATTR_PX;
304 
305 	return attr;
306 }
307 
308 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
309 			void *va_alias)
310 {
311 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
312 
313 	if (area->store) {
314 		size_t rel_pg_idx = pg_idx - area->mm->offset;
315 		const void *stored_page = area->store +
316 					  rel_pg_idx * SMALL_PAGE_SIZE;
317 
318 		memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
319 		incr_ro_hits();
320 	} else {
321 		memset(va_alias, 0, SMALL_PAGE_SIZE);
322 		incr_rw_hits();
323 	}
324 }
325 
326 static void tee_pager_verify_page(struct tee_pager_area *area, vaddr_t page_va,
327 			void *va_alias)
328 {
329 	size_t pg_idx = (page_va - area->mm->pool->lo) >> SMALL_PAGE_SHIFT;
330 
331 	if (area->store) {
332 		size_t rel_pg_idx = pg_idx - area->mm->offset;
333 		const void *hash = area->hashes +
334 				   rel_pg_idx * TEE_SHA256_HASH_SIZE;
335 
336 		if (hash_sha256_check(hash, va_alias, SMALL_PAGE_SIZE) !=
337 				TEE_SUCCESS) {
338 			EMSG("PH 0x%" PRIxVA " failed", page_va);
339 			panic();
340 		}
341 	}
342 }
343 
344 static bool tee_pager_unhide_page(vaddr_t page_va)
345 {
346 	struct tee_pager_pmem *pmem;
347 
348 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
349 		paddr_t pa;
350 		uint32_t attr;
351 
352 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
353 				   &pa, &attr);
354 
355 		if (!(attr & TEE_MATTR_HIDDEN_BLOCK))
356 			continue;
357 
358 		if (core_mmu_va2idx(&tee_pager_tbl_info, page_va) ==
359 		    pmem->pgidx) {
360 			/* page is hidden, show and move to back */
361 			core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
362 					   get_area_mattr(pmem->area));
363 
364 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
365 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
366 
367 			/* TODO only invalidate entry touched above */
368 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
369 
370 			incr_hidden_hits();
371 			return true;
372 		}
373 	}
374 
375 	return false;
376 }
377 
378 static void tee_pager_hide_pages(void)
379 {
380 	struct tee_pager_pmem *pmem;
381 	size_t n = 0;
382 
383 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
384 		paddr_t pa;
385 		uint32_t attr;
386 
387 		if (n >= TEE_PAGER_NHIDE)
388 			break;
389 		n++;
390 
391 		/*
392 		 * we cannot hide pages when pmem->area is not defined as
393 		 * unhide requires pmem->area to be defined
394 		 */
395 		if (!pmem->area)
396 			continue;
397 
398 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
399 				   &pa, &attr);
400 		if (!(attr & TEE_MATTR_VALID_BLOCK))
401 			continue;
402 
403 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
404 				   TEE_MATTR_HIDDEN_BLOCK);
405 
406 	}
407 
408 	/* TODO only invalidate entries touched above */
409 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
410 }
411 
412 /*
413  * Find mapped pmem, hide and move to pageble pmem.
414  * Return false if page was not mapped, and true if page was mapped.
415  */
416 static bool tee_pager_release_one_zi(vaddr_t page_va)
417 {
418 	struct tee_pager_pmem *pmem;
419 	unsigned pgidx;
420 	paddr_t pa;
421 	uint32_t attr;
422 
423 	pgidx = core_mmu_va2idx(&tee_pager_tbl_info, page_va);
424 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
425 
426 #ifdef TEE_PAGER_DEBUG_PRINT
427 	DMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
428 #endif
429 
430 	TAILQ_FOREACH(pmem, &tee_pager_rw_pmem_head, link) {
431 		if (pmem->pgidx != pgidx)
432 			continue;
433 
434 		core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
435 				   TEE_MATTR_PHYS_BLOCK);
436 		TAILQ_REMOVE(&tee_pager_rw_pmem_head, pmem, link);
437 		tee_pager_npages++;
438 		set_npages();
439 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
440 		incr_zi_released();
441 
442 
443 		return true;
444 	}
445 
446 	return false;
447 }
448 
449 /* Finds the oldest page and remaps it for the new virtual address */
450 static bool tee_pager_get_page(struct abort_info *ai,
451 			struct tee_pager_area *area,
452 			struct tee_pager_pmem **pmem_ret, paddr_t *pa_ret)
453 {
454 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
455 	struct tee_pager_pmem *pmem;
456 	paddr_t pa;
457 	uint32_t attr;
458 
459 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
460 
461 	assert(!(attr & (TEE_MATTR_VALID_BLOCK | TEE_MATTR_HIDDEN_BLOCK)));
462 
463 	if (attr & TEE_MATTR_PHYS_BLOCK) {
464 		/*
465 		 * There's an pmem entry using this mmu entry, let's use
466 		 * that entry in the new mapping.
467 		 */
468 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
469 			if (pmem->pgidx == pgidx)
470 				break;
471 		}
472 		if (!pmem) {
473 			DMSG("Couldn't find pmem for pgidx %u", pgidx);
474 			return false;
475 		}
476 	} else {
477 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
478 		if (!pmem) {
479 			DMSG("No pmem entries");
480 			return false;
481 		}
482 		core_mmu_get_entry(&tee_pager_tbl_info, pmem->pgidx,
483 				   &pa, &attr);
484 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, 0, 0);
485 	}
486 
487 	pmem->pgidx = pgidx;
488 	pmem->area = area;
489 	core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
490 			   TEE_MATTR_PHYS_BLOCK);
491 
492 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
493 	if (area->store) {
494 		/* move page to back */
495 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
496 	} else {
497 		/* Move page to rw list */
498 		TEE_ASSERT(tee_pager_npages > 0);
499 		tee_pager_npages--;
500 		set_npages();
501 		TAILQ_INSERT_TAIL(&tee_pager_rw_pmem_head, pmem, link);
502 	}
503 
504 	/* TODO only invalidate entries touched above */
505 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
506 
507 	*pmem_ret = pmem;
508 	*pa_ret = pa;
509 	return true;
510 }
511 
512 static bool pager_check_access(struct abort_info *ai)
513 {
514 	unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, ai->va);
515 	uint32_t attr;
516 
517 	core_mmu_get_entry(&tee_pager_tbl_info, pgidx, NULL, &attr);
518 
519 	/* Not mapped */
520 	if (!(attr & TEE_MATTR_VALID_BLOCK))
521 		return false;
522 
523 	/* Not readable, should not happen */
524 	if (!(attr & TEE_MATTR_PR)) {
525 		abort_print_error(ai);
526 		panic();
527 	}
528 
529 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
530 	case CORE_MMU_FAULT_TRANSLATION:
531 	case CORE_MMU_FAULT_READ_PERMISSION:
532 		if (ai->abort_type == ABORT_TYPE_PREFETCH &&
533 		    !(attr & TEE_MATTR_PX)) {
534 			/* Attempting to execute from an NOX page */
535 			abort_print_error(ai);
536 			panic();
537 		}
538 		/* Since the page is mapped now it's OK */
539 		return true;
540 	case CORE_MMU_FAULT_WRITE_PERMISSION:
541 		if (!(attr & TEE_MATTR_PW)) {
542 			/* Attempting to write to an RO page */
543 			abort_print_error(ai);
544 			panic();
545 		}
546 		return true;
547 	default:
548 		/* Some fault we can't deal with */
549 		abort_print_error(ai);
550 		panic();
551 	}
552 
553 }
554 
555 void tee_pager_handle_fault(struct abort_info *ai)
556 {
557 	struct tee_pager_area *area;
558 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
559 	uint32_t exceptions;
560 
561 #ifdef TEE_PAGER_DEBUG_PRINT
562 	abort_print(ai);
563 #endif
564 
565 	/* check if the access is valid */
566 	area = tee_pager_find_area(ai->va);
567 	if (!area) {
568 		abort_print_error(ai);
569 		DMSG("Invalid addr 0x%" PRIxVA, ai->va);
570 		panic();
571 	}
572 
573 	/*
574 	 * We're updating pages that can affect several active CPUs at a
575 	 * time below. We end up here because a thread tries to access some
576 	 * memory that isn't available. We have to be careful when making
577 	 * that memory available as other threads may succeed in accessing
578 	 * that address the moment after we've made it available.
579 	 *
580 	 * That means that we can't just map the memory and populate the
581 	 * page, instead we use the aliased mapping to populate the page
582 	 * and once everything is ready we map it.
583 	 */
584 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
585 	cpu_spin_lock(&pager_lock);
586 
587 	if (!tee_pager_unhide_page(page_va)) {
588 		struct tee_pager_pmem *pmem = NULL;
589 		paddr_t pa = 0;
590 
591 		/*
592 		 * The page wasn't hidden, but some other core may have
593 		 * updated the table entry before we got here.
594 		 */
595 		if (pager_check_access(ai)) {
596 			/*
597 			 * Kind of access is OK with the mapping, we're
598 			 * done here because the fault has already been
599 			 * dealt with by another core.
600 			 */
601 			goto out;
602 		}
603 
604 		if (!tee_pager_get_page(ai, area, &pmem, &pa)) {
605 			abort_print(ai);
606 			panic();
607 		}
608 
609 		/* load page code & data */
610 		tee_pager_load_page(area, page_va, pmem->va_alias);
611 		tee_pager_verify_page(area, page_va, pmem->va_alias);
612 
613 		/*
614 		 * We've updated the page using the aliased mapping and
615 		 * some cache maintenence is now needed if it's an
616 		 * executable page.
617 		 *
618 		 * Since the d-cache is a Physically-indexed,
619 		 * physically-tagged (PIPT) cache we can clean the aliased
620 		 * address instead of the real virtual address.
621 		 *
622 		 * The i-cache can also be PIPT, but may be something else
623 		 * to, to keep it simple we invalidate the entire i-cache.
624 		 * As a future optimization we may invalidate only the
625 		 * aliased area if it a PIPT cache else the entire cache.
626 		 */
627 		if (area->flags & TEE_PAGER_AREA_X) {
628 			/*
629 			 * Doing these operations to LoUIS (Level of
630 			 * unification, Inner Shareable) would be enough
631 			 */
632 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
633 				pmem->va_alias, SMALL_PAGE_SIZE);
634 
635 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
636 		}
637 
638 		core_mmu_set_entry(&tee_pager_tbl_info, pmem->pgidx, pa,
639 				   get_area_mattr(area));
640 
641 #ifdef TEE_PAGER_DEBUG_PRINT
642 		DMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
643 		     core_mmu_idx2va(&tee_pager_tbl_info, pmem->pgidx), pa);
644 #endif
645 
646 	}
647 
648 	tee_pager_hide_pages();
649 out:
650 	cpu_spin_unlock(&pager_lock);
651 	thread_unmask_exceptions(exceptions);
652 }
653 
654 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
655 {
656 	size_t n;
657 
658 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
659 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
660 
661 	/* setup memory */
662 	for (n = 0; n < npages; n++) {
663 		struct tee_pager_pmem *pmem;
664 		tee_vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
665 		unsigned pgidx = core_mmu_va2idx(&tee_pager_tbl_info, va);
666 		paddr_t pa;
667 		uint32_t attr;
668 
669 		core_mmu_get_entry(&tee_pager_tbl_info, pgidx, &pa, &attr);
670 
671 		/* Ignore unmapped pages/blocks */
672 		if (!(attr & TEE_MATTR_VALID_BLOCK))
673 			continue;
674 
675 		pmem = malloc(sizeof(struct tee_pager_pmem));
676 		if (pmem == NULL) {
677 			DMSG("Can't allocate memory");
678 			panic();
679 		}
680 
681 		pmem->pgidx = pgidx;
682 		pmem->va_alias = pager_add_alias_page(pa);
683 
684 		if (unmap) {
685 			/*
686 			 * Note that we're making the page inaccessible
687 			 * with the TEE_MATTR_PHYS_BLOCK attribute to
688 			 * indicate that the descriptor still holds a valid
689 			 * physical address of a page.
690 			 */
691 			pmem->area = NULL;
692 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
693 					   TEE_MATTR_PHYS_BLOCK);
694 		} else {
695 			/*
696 			 * The page is still mapped, let's assign the area
697 			 * and update the protection bits accordingly.
698 			 */
699 			pmem->area = tee_pager_find_area(va);
700 			core_mmu_set_entry(&tee_pager_tbl_info, pgidx, pa,
701 					   get_area_mattr(pmem->area));
702 		}
703 
704 		tee_pager_npages++;
705 		incr_npages_all();
706 		set_npages();
707 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
708 	}
709 
710 	/* Invalidate secure TLB */
711 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
712 }
713 
714 void tee_pager_release_zi(vaddr_t vaddr, size_t size)
715 {
716 	bool unmaped = false;
717 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
718 
719 	if ((vaddr & SMALL_PAGE_MASK) || (size & SMALL_PAGE_MASK))
720 		panic();
721 
722 	for (; size; vaddr += SMALL_PAGE_SIZE, size -= SMALL_PAGE_SIZE)
723 		unmaped |= tee_pager_release_one_zi(vaddr);
724 
725 	/* Invalidate secure TLB */
726 	if (unmaped)
727 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
728 
729 	thread_set_exceptions(exceptions);
730 }
731 
732 void *tee_pager_request_zi(size_t size)
733 {
734 	tee_mm_entry_t *mm;
735 
736 	if (!size)
737 		return NULL;
738 
739 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
740 	if (!mm)
741 		return NULL;
742 
743 	tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL);
744 
745 	return (void *)tee_mm_get_smem(mm);
746 }
747