xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 13616e886edc3680c03cdd455fa53fee5a9cba6d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <io.h>
10 #include <keep.h>
11 #include <kernel/abort.h>
12 #include <kernel/asan.h>
13 #include <kernel/cache_helpers.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/thread.h>
20 #include <kernel/tlb_helpers.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <mm/core_memprot.h>
23 #include <mm/fobj.h>
24 #include <mm/tee_mm.h>
25 #include <mm/tee_pager.h>
26 #include <stdlib.h>
27 #include <sys/queue.h>
28 #include <tee_api_defines.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <utee_defines.h>
32 #include <util.h>
33 
34 
35 static struct tee_pager_area_head tee_pager_area_head =
36 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
37 
38 #define INVALID_PGIDX		UINT_MAX
39 #define PMEM_FLAG_DIRTY		BIT(0)
40 #define PMEM_FLAG_HIDDEN	BIT(1)
41 
42 /*
43  * struct tee_pager_pmem - Represents a physical page used for paging.
44  *
45  * @flags	flags defined by PMEM_FLAG_* above
46  * @fobj_pgidx	index of the page in the @fobj
47  * @fobj	File object of which a page is made visible.
48  * @va_alias	Virtual address where the physical page always is aliased.
49  *		Used during remapping of the page when the content need to
50  *		be updated before it's available at the new location.
51  */
52 struct tee_pager_pmem {
53 	unsigned int flags;
54 	unsigned int fobj_pgidx;
55 	struct fobj *fobj;
56 	void *va_alias;
57 	TAILQ_ENTRY(tee_pager_pmem) link;
58 };
59 
60 /* The list of physical pages. The first page in the list is the oldest */
61 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
62 
63 static struct tee_pager_pmem_head tee_pager_pmem_head =
64 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
65 
66 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
67 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
68 
69 /* number of pages hidden */
70 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
71 
72 /* Number of registered physical pages, used hiding pages. */
73 static size_t tee_pager_npages;
74 
75 /* This area covers the IVs for all fobjs with paged IVs */
76 static struct tee_pager_area *pager_iv_area;
77 /* Used by make_iv_available(), see make_iv_available() for details. */
78 static struct tee_pager_pmem *pager_spare_pmem;
79 
80 #ifdef CFG_WITH_STATS
81 static struct tee_pager_stats pager_stats;
82 
83 static inline void incr_ro_hits(void)
84 {
85 	pager_stats.ro_hits++;
86 }
87 
88 static inline void incr_rw_hits(void)
89 {
90 	pager_stats.rw_hits++;
91 }
92 
93 static inline void incr_hidden_hits(void)
94 {
95 	pager_stats.hidden_hits++;
96 }
97 
98 static inline void incr_zi_released(void)
99 {
100 	pager_stats.zi_released++;
101 }
102 
103 static inline void incr_npages_all(void)
104 {
105 	pager_stats.npages_all++;
106 }
107 
108 static inline void set_npages(void)
109 {
110 	pager_stats.npages = tee_pager_npages;
111 }
112 
113 void tee_pager_get_stats(struct tee_pager_stats *stats)
114 {
115 	*stats = pager_stats;
116 
117 	pager_stats.hidden_hits = 0;
118 	pager_stats.ro_hits = 0;
119 	pager_stats.rw_hits = 0;
120 	pager_stats.zi_released = 0;
121 }
122 
123 #else /* CFG_WITH_STATS */
124 static inline void incr_ro_hits(void) { }
125 static inline void incr_rw_hits(void) { }
126 static inline void incr_hidden_hits(void) { }
127 static inline void incr_zi_released(void) { }
128 static inline void incr_npages_all(void) { }
129 static inline void set_npages(void) { }
130 
131 void tee_pager_get_stats(struct tee_pager_stats *stats)
132 {
133 	memset(stats, 0, sizeof(struct tee_pager_stats));
134 }
135 #endif /* CFG_WITH_STATS */
136 
137 #define TBL_NUM_ENTRIES	(CORE_MMU_PGDIR_SIZE / SMALL_PAGE_SIZE)
138 #define TBL_LEVEL	CORE_MMU_PGDIR_LEVEL
139 #define TBL_SHIFT	SMALL_PAGE_SHIFT
140 
141 #define EFFECTIVE_VA_SIZE \
142 	(ROUNDUP(VCORE_START_VA + TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE) - \
143 	 ROUNDDOWN(VCORE_START_VA, CORE_MMU_PGDIR_SIZE))
144 
145 static struct pager_table {
146 	struct pgt pgt;
147 	struct core_mmu_table_info tbl_info;
148 } *pager_tables;
149 static unsigned int num_pager_tables;
150 
151 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
152 
153 /* Defines the range of the alias area */
154 static tee_mm_entry_t *pager_alias_area;
155 /*
156  * Physical pages are added in a stack like fashion to the alias area,
157  * @pager_alias_next_free gives the address of next free entry if
158  * @pager_alias_next_free is != 0
159  */
160 static uintptr_t pager_alias_next_free;
161 
162 #ifdef CFG_TEE_CORE_DEBUG
163 #define pager_lock(ai) pager_lock_dldetect(__func__, __LINE__, ai)
164 
165 static uint32_t pager_lock_dldetect(const char *func, const int line,
166 				    struct abort_info *ai)
167 {
168 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
169 	unsigned int retries = 0;
170 	unsigned int reminder = 0;
171 
172 	while (!cpu_spin_trylock(&pager_spinlock)) {
173 		retries++;
174 		if (!retries) {
175 			/* wrapped, time to report */
176 			trace_printf(func, line, TRACE_ERROR, true,
177 				     "possible spinlock deadlock reminder %u",
178 				     reminder);
179 			if (reminder < UINT_MAX)
180 				reminder++;
181 			if (ai)
182 				abort_print(ai);
183 		}
184 	}
185 
186 	return exceptions;
187 }
188 #else
189 static uint32_t pager_lock(struct abort_info __unused *ai)
190 {
191 	return cpu_spin_lock_xsave(&pager_spinlock);
192 }
193 #endif
194 
195 static uint32_t pager_lock_check_stack(size_t stack_size)
196 {
197 	if (stack_size) {
198 		int8_t buf[stack_size];
199 		size_t n;
200 
201 		/*
202 		 * Make sure to touch all pages of the stack that we expect
203 		 * to use with this lock held. We need to take eventual
204 		 * page faults before the lock is taken or we'll deadlock
205 		 * the pager. The pages that are populated in this way will
206 		 * eventually be released at certain save transitions of
207 		 * the thread.
208 		 */
209 		for (n = 0; n < stack_size; n += SMALL_PAGE_SIZE)
210 			io_write8((vaddr_t)buf + n, 1);
211 		io_write8((vaddr_t)buf + stack_size - 1, 1);
212 	}
213 
214 	return pager_lock(NULL);
215 }
216 
217 static void pager_unlock(uint32_t exceptions)
218 {
219 	cpu_spin_unlock_xrestore(&pager_spinlock, exceptions);
220 }
221 
222 void *tee_pager_phys_to_virt(paddr_t pa)
223 {
224 	struct core_mmu_table_info ti;
225 	unsigned idx;
226 	uint32_t a;
227 	paddr_t p;
228 	vaddr_t v;
229 	size_t n;
230 
231 	/*
232 	 * Most addresses are mapped lineary, try that first if possible.
233 	 */
234 	if (!tee_pager_get_table_info(pa, &ti))
235 		return NULL; /* impossible pa */
236 	idx = core_mmu_va2idx(&ti, pa);
237 	core_mmu_get_entry(&ti, idx, &p, &a);
238 	if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
239 		return (void *)core_mmu_idx2va(&ti, idx);
240 
241 	n = 0;
242 	idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_VA_START);
243 	while (true) {
244 		while (idx < TBL_NUM_ENTRIES) {
245 			v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx);
246 			if (v >= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE))
247 				return NULL;
248 
249 			core_mmu_get_entry(&pager_tables[n].tbl_info,
250 					   idx, &p, &a);
251 			if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
252 				return (void *)v;
253 			idx++;
254 		}
255 
256 		n++;
257 		if (n >= num_pager_tables)
258 			return NULL;
259 		idx = 0;
260 	}
261 
262 	return NULL;
263 }
264 
265 static bool pmem_is_hidden(struct tee_pager_pmem *pmem)
266 {
267 	return pmem->flags & PMEM_FLAG_HIDDEN;
268 }
269 
270 static bool pmem_is_dirty(struct tee_pager_pmem *pmem)
271 {
272 	return pmem->flags & PMEM_FLAG_DIRTY;
273 }
274 
275 static bool pmem_is_covered_by_area(struct tee_pager_pmem *pmem,
276 				    struct tee_pager_area *area)
277 {
278 	if (pmem->fobj != area->fobj)
279 		return false;
280 	if (pmem->fobj_pgidx < area->fobj_pgoffs)
281 		return false;
282 	if ((pmem->fobj_pgidx - area->fobj_pgoffs) >=
283 	    (area->size >> SMALL_PAGE_SHIFT))
284 		return false;
285 
286 	return true;
287 }
288 
289 static size_t pmem_get_area_tblidx(struct tee_pager_pmem *pmem,
290 				   struct tee_pager_area *area)
291 {
292 	size_t tbloffs = (area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT;
293 
294 	return pmem->fobj_pgidx - area->fobj_pgoffs + tbloffs;
295 }
296 
297 static struct pager_table *find_pager_table_may_fail(vaddr_t va)
298 {
299 	size_t n;
300 	const vaddr_t mask = CORE_MMU_PGDIR_MASK;
301 
302 	if (!pager_tables)
303 		return NULL;
304 
305 	n = ((va & ~mask) - pager_tables[0].tbl_info.va_base) >>
306 	    CORE_MMU_PGDIR_SHIFT;
307 	if (n >= num_pager_tables)
308 		return NULL;
309 
310 	assert(va >= pager_tables[n].tbl_info.va_base &&
311 	       va <= (pager_tables[n].tbl_info.va_base | mask));
312 
313 	return pager_tables + n;
314 }
315 
316 static struct pager_table *find_pager_table(vaddr_t va)
317 {
318 	struct pager_table *pt = find_pager_table_may_fail(va);
319 
320 	assert(pt);
321 	return pt;
322 }
323 
324 bool tee_pager_get_table_info(vaddr_t va, struct core_mmu_table_info *ti)
325 {
326 	struct pager_table *pt = find_pager_table_may_fail(va);
327 
328 	if (!pt)
329 		return false;
330 
331 	*ti = pt->tbl_info;
332 	return true;
333 }
334 
335 static struct core_mmu_table_info *find_table_info(vaddr_t va)
336 {
337 	return &find_pager_table(va)->tbl_info;
338 }
339 
340 static struct pgt *find_core_pgt(vaddr_t va)
341 {
342 	return &find_pager_table(va)->pgt;
343 }
344 
345 void tee_pager_set_alias_area(tee_mm_entry_t *mm)
346 {
347 	struct pager_table *pt;
348 	unsigned idx;
349 	vaddr_t smem = tee_mm_get_smem(mm);
350 	size_t nbytes = tee_mm_get_bytes(mm);
351 	vaddr_t v;
352 	uint32_t a = 0;
353 
354 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
355 
356 	assert(!pager_alias_area);
357 	pager_alias_area = mm;
358 	pager_alias_next_free = smem;
359 
360 	/* Clear all mapping in the alias area */
361 	pt = find_pager_table(smem);
362 	idx = core_mmu_va2idx(&pt->tbl_info, smem);
363 	while (pt <= (pager_tables + num_pager_tables - 1)) {
364 		while (idx < TBL_NUM_ENTRIES) {
365 			v = core_mmu_idx2va(&pt->tbl_info, idx);
366 			if (v >= (smem + nbytes))
367 				goto out;
368 
369 			core_mmu_get_entry(&pt->tbl_info, idx, NULL, &a);
370 			core_mmu_set_entry(&pt->tbl_info, idx, 0, 0);
371 			if (a & TEE_MATTR_VALID_BLOCK)
372 				pgt_dec_used_entries(&pt->pgt);
373 			idx++;
374 		}
375 
376 		pt++;
377 		idx = 0;
378 	}
379 
380 out:
381 	tlbi_mva_range(smem, nbytes, SMALL_PAGE_SIZE);
382 }
383 
384 static size_t tbl_usage_count(struct core_mmu_table_info *ti)
385 {
386 	size_t n;
387 	uint32_t a = 0;
388 	size_t usage = 0;
389 
390 	for (n = 0; n < ti->num_entries; n++) {
391 		core_mmu_get_entry(ti, n, NULL, &a);
392 		if (a & TEE_MATTR_VALID_BLOCK)
393 			usage++;
394 	}
395 	return usage;
396 }
397 
398 static void area_get_entry(struct tee_pager_area *area, size_t idx,
399 			   paddr_t *pa, uint32_t *attr)
400 {
401 	assert(area->pgt);
402 	assert(idx < TBL_NUM_ENTRIES);
403 	core_mmu_get_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
404 }
405 
406 static void area_set_entry(struct tee_pager_area *area, size_t idx,
407 			   paddr_t pa, uint32_t attr)
408 {
409 	assert(area->pgt);
410 	assert(idx < TBL_NUM_ENTRIES);
411 	core_mmu_set_entry_primitive(area->pgt->tbl, TBL_LEVEL, idx, pa, attr);
412 }
413 
414 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
415 {
416 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
417 }
418 
419 static vaddr_t area_idx2va(struct tee_pager_area *area, size_t idx)
420 {
421 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
422 }
423 
424 static void area_tlbi_page_va(struct tee_pager_area *area, vaddr_t va)
425 {
426 #if defined(CFG_PAGED_USER_TA)
427 	assert(area->pgt);
428 	if (area->pgt->ctx) {
429 		uint32_t asid = to_user_mode_ctx(area->pgt->ctx)->vm_info.asid;
430 
431 		tlbi_mva_asid(va, asid);
432 		return;
433 	}
434 #endif
435 	tlbi_mva_allasid(va);
436 }
437 
438 static void area_tlbi_entry(struct tee_pager_area *area, size_t idx)
439 {
440 	area_tlbi_page_va(area, area_idx2va(area, idx));
441 }
442 
443 static void pmem_assign_fobj_page(struct tee_pager_pmem *pmem,
444 				  struct tee_pager_area *area, vaddr_t va)
445 {
446 	struct tee_pager_pmem *p = NULL;
447 	unsigned int fobj_pgidx = 0;
448 
449 	assert(!pmem->fobj && pmem->fobj_pgidx == INVALID_PGIDX);
450 
451 	fobj_pgidx = area_va2idx(area, va) + area->fobj_pgoffs -
452 		     ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
453 
454 	TAILQ_FOREACH(p, &tee_pager_pmem_head, link)
455 		assert(p->fobj != area->fobj || p->fobj_pgidx != fobj_pgidx);
456 
457 	pmem->fobj = area->fobj;
458 	pmem->fobj_pgidx = fobj_pgidx;
459 }
460 
461 static void pmem_clear(struct tee_pager_pmem *pmem)
462 {
463 	pmem->fobj = NULL;
464 	pmem->fobj_pgidx = INVALID_PGIDX;
465 	pmem->flags = 0;
466 }
467 
468 static void pmem_unmap(struct tee_pager_pmem *pmem, struct pgt *only_this_pgt)
469 {
470 	struct tee_pager_area *area = NULL;
471 	size_t tblidx = 0;
472 	uint32_t a = 0;
473 
474 	TAILQ_FOREACH(area, &pmem->fobj->areas, fobj_link) {
475 		/*
476 		 * If only_this_pgt points to a pgt then the pgt of this
477 		 * area has to match or we'll skip over it.
478 		 */
479 		if (only_this_pgt && area->pgt != only_this_pgt)
480 			continue;
481 		if (!area->pgt || !pmem_is_covered_by_area(pmem, area))
482 			continue;
483 		tblidx = pmem_get_area_tblidx(pmem, area);
484 		area_get_entry(area, tblidx, NULL, &a);
485 		if (a & TEE_MATTR_VALID_BLOCK) {
486 			area_set_entry(area, tblidx, 0, 0);
487 			pgt_dec_used_entries(area->pgt);
488 			area_tlbi_entry(area, tblidx);
489 		}
490 	}
491 }
492 
493 void tee_pager_early_init(void)
494 {
495 	size_t n = 0;
496 
497 	num_pager_tables = EFFECTIVE_VA_SIZE / CORE_MMU_PGDIR_SIZE;
498 	pager_tables = calloc(num_pager_tables, sizeof(*pager_tables));
499 	if (!pager_tables)
500 		panic("Cannot allocate pager_tables");
501 
502 	/*
503 	 * Note that this depends on add_pager_vaspace() adding vaspace
504 	 * after end of memory.
505 	 */
506 	for (n = 0; n < num_pager_tables; n++) {
507 		if (!core_mmu_find_table(NULL, VCORE_START_VA +
508 					 n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
509 					 &pager_tables[n].tbl_info))
510 			panic("can't find mmu tables");
511 
512 		if (pager_tables[n].tbl_info.shift != TBL_SHIFT)
513 			panic("Unsupported page size in translation table");
514 		assert(pager_tables[n].tbl_info.num_entries == TBL_NUM_ENTRIES);
515 		assert(pager_tables[n].tbl_info.level == TBL_LEVEL);
516 
517 		pager_tables[n].pgt.tbl = pager_tables[n].tbl_info.table;
518 		pgt_set_used_entries(&pager_tables[n].pgt,
519 				tbl_usage_count(&pager_tables[n].tbl_info));
520 	}
521 }
522 
523 static void *pager_add_alias_page(paddr_t pa)
524 {
525 	unsigned idx;
526 	struct core_mmu_table_info *ti;
527 	/* Alias pages mapped without write permission: runtime will care */
528 	uint32_t attr = TEE_MATTR_VALID_BLOCK |
529 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
530 			TEE_MATTR_SECURE | TEE_MATTR_PR;
531 
532 	DMSG("0x%" PRIxPA, pa);
533 
534 	ti = find_table_info(pager_alias_next_free);
535 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
536 	core_mmu_set_entry(ti, idx, pa, attr);
537 	pgt_inc_used_entries(find_core_pgt(pager_alias_next_free));
538 	pager_alias_next_free += SMALL_PAGE_SIZE;
539 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
540 				      tee_mm_get_bytes(pager_alias_area)))
541 		pager_alias_next_free = 0;
542 	return (void *)core_mmu_idx2va(ti, idx);
543 }
544 
545 static void area_insert(struct tee_pager_area_head *head,
546 			struct tee_pager_area *area,
547 			struct tee_pager_area *a_prev)
548 {
549 	uint32_t exceptions = pager_lock_check_stack(8);
550 
551 	if (a_prev)
552 		TAILQ_INSERT_AFTER(head, a_prev, area, link);
553 	else
554 		TAILQ_INSERT_HEAD(head, area, link);
555 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
556 
557 	pager_unlock(exceptions);
558 }
559 DECLARE_KEEP_PAGER(area_insert);
560 
561 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
562 			     struct fobj *fobj)
563 {
564 	struct tee_pager_area *area = NULL;
565 	uint32_t flags = 0;
566 	size_t fobj_pgoffs = 0;
567 	vaddr_t b = base;
568 	size_t s = 0;
569 	size_t s2 = 0;
570 
571 	assert(fobj);
572 	s = fobj->num_pages * SMALL_PAGE_SIZE;
573 
574 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : type %d", base, base + s, type);
575 
576 	if (base & SMALL_PAGE_MASK || !s) {
577 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, s);
578 		panic();
579 	}
580 
581 	switch (type) {
582 	case PAGER_AREA_TYPE_RO:
583 		flags = TEE_MATTR_PRX;
584 		break;
585 	case PAGER_AREA_TYPE_RW:
586 	case PAGER_AREA_TYPE_LOCK:
587 		flags = TEE_MATTR_PRW;
588 		break;
589 	default:
590 		panic();
591 	}
592 
593 	while (s) {
594 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
595 		area = calloc(1, sizeof(*area));
596 		if (!area)
597 			panic("alloc_area");
598 
599 		area->fobj = fobj_get(fobj);
600 		area->fobj_pgoffs = fobj_pgoffs;
601 		area->type = type;
602 		area->pgt = find_core_pgt(b);
603 		area->base = b;
604 		area->size = s2;
605 		area->flags = flags;
606 		area_insert(&tee_pager_area_head, area, NULL);
607 
608 		b += s2;
609 		s -= s2;
610 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
611 	}
612 }
613 
614 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
615 					vaddr_t va)
616 {
617 	struct tee_pager_area *area;
618 
619 	if (!areas)
620 		return NULL;
621 
622 	TAILQ_FOREACH(area, areas, link) {
623 		if (core_is_buffer_inside(va, 1, area->base, area->size))
624 			return area;
625 	}
626 	return NULL;
627 }
628 
629 #ifdef CFG_PAGED_USER_TA
630 static struct tee_pager_area *find_uta_area(vaddr_t va)
631 {
632 	struct ts_ctx *ctx = thread_get_tsd()->ctx;
633 
634 	if (!is_user_mode_ctx(ctx))
635 		return NULL;
636 	return find_area(to_user_mode_ctx(ctx)->areas, va);
637 }
638 #else
639 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
640 {
641 	return NULL;
642 }
643 #endif /*CFG_PAGED_USER_TA*/
644 
645 
646 static uint32_t get_area_mattr(uint32_t area_flags)
647 {
648 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
649 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
650 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
651 
652 	return attr;
653 }
654 
655 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
656 {
657 	struct core_mmu_table_info *ti;
658 	paddr_t pa;
659 	unsigned idx;
660 
661 	ti = find_table_info((vaddr_t)pmem->va_alias);
662 	idx = core_mmu_va2idx(ti, (vaddr_t)pmem->va_alias);
663 	core_mmu_get_entry(ti, idx, &pa, NULL);
664 	return pa;
665 }
666 
667 #ifdef CFG_PAGED_USER_TA
668 static void unlink_area(struct tee_pager_area_head *area_head,
669 			struct tee_pager_area *area)
670 {
671 	uint32_t exceptions = pager_lock_check_stack(64);
672 
673 	TAILQ_REMOVE(area_head, area, link);
674 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
675 
676 	pager_unlock(exceptions);
677 }
678 DECLARE_KEEP_PAGER(unlink_area);
679 
680 static void free_area(struct tee_pager_area *area)
681 {
682 	fobj_put(area->fobj);
683 	free(area);
684 }
685 
686 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
687 				    struct fobj *fobj, uint32_t prot)
688 {
689 	struct tee_pager_area *a_prev = NULL;
690 	struct tee_pager_area *area = NULL;
691 	vaddr_t b = base;
692 	size_t fobj_pgoffs = 0;
693 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
694 
695 	if (!uctx->areas) {
696 		uctx->areas = malloc(sizeof(*uctx->areas));
697 		if (!uctx->areas)
698 			return TEE_ERROR_OUT_OF_MEMORY;
699 		TAILQ_INIT(uctx->areas);
700 	}
701 
702 	area = TAILQ_FIRST(uctx->areas);
703 	while (area) {
704 		if (core_is_buffer_intersect(b, s, area->base,
705 					     area->size))
706 			return TEE_ERROR_BAD_PARAMETERS;
707 		if (b < area->base)
708 			break;
709 		a_prev = area;
710 		area = TAILQ_NEXT(area, link);
711 	}
712 
713 	while (s) {
714 		size_t s2;
715 
716 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
717 		area = calloc(1, sizeof(*area));
718 		if (!area)
719 			return TEE_ERROR_OUT_OF_MEMORY;
720 
721 		/* Table info will be set when the context is activated. */
722 		area->fobj = fobj_get(fobj);
723 		area->fobj_pgoffs = fobj_pgoffs;
724 		area->type = PAGER_AREA_TYPE_RW;
725 		area->base = b;
726 		area->size = s2;
727 		area->flags = prot;
728 
729 		area_insert(uctx->areas, area, a_prev);
730 
731 		a_prev = area;
732 		b += s2;
733 		s -= s2;
734 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
735 	}
736 
737 	return TEE_SUCCESS;
738 }
739 
740 TEE_Result tee_pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
741 				 struct fobj *fobj, uint32_t prot)
742 {
743 	TEE_Result res = TEE_SUCCESS;
744 	struct thread_specific_data *tsd = thread_get_tsd();
745 	struct tee_pager_area *area = NULL;
746 	struct core_mmu_table_info dir_info = { NULL };
747 
748 	if (uctx->ts_ctx != tsd->ctx) {
749 		/*
750 		 * Changes are to an utc that isn't active. Just add the
751 		 * areas page tables will be dealt with later.
752 		 */
753 		return pager_add_um_area(uctx, base, fobj, prot);
754 	}
755 
756 	/*
757 	 * Assign page tables before adding areas to be able to tell which
758 	 * are newly added and should be removed in case of failure.
759 	 */
760 	tee_pager_assign_um_tables(uctx);
761 	res = pager_add_um_area(uctx, base, fobj, prot);
762 	if (res) {
763 		struct tee_pager_area *next_a;
764 
765 		/* Remove all added areas */
766 		TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
767 			if (!area->pgt) {
768 				unlink_area(uctx->areas, area);
769 				free_area(area);
770 			}
771 		}
772 		return res;
773 	}
774 
775 	/*
776 	 * Assign page tables to the new areas and make sure that the page
777 	 * tables are registered in the upper table.
778 	 */
779 	tee_pager_assign_um_tables(uctx);
780 	core_mmu_get_user_pgdir(&dir_info);
781 	TAILQ_FOREACH(area, uctx->areas, link) {
782 		paddr_t pa;
783 		size_t idx;
784 		uint32_t attr;
785 
786 		idx = core_mmu_va2idx(&dir_info, area->pgt->vabase);
787 		core_mmu_get_entry(&dir_info, idx, &pa, &attr);
788 
789 		/*
790 		 * Check if the page table already is used, if it is, it's
791 		 * already registered.
792 		 */
793 		if (area->pgt->num_used_entries) {
794 			assert(attr & TEE_MATTR_TABLE);
795 			assert(pa == virt_to_phys(area->pgt->tbl));
796 			continue;
797 		}
798 
799 		attr = TEE_MATTR_SECURE | TEE_MATTR_TABLE;
800 		pa = virt_to_phys(area->pgt->tbl);
801 		assert(pa);
802 		/*
803 		 * Note that the update of the table entry is guaranteed to
804 		 * be atomic.
805 		 */
806 		core_mmu_set_entry(&dir_info, idx, pa, attr);
807 	}
808 
809 	return TEE_SUCCESS;
810 }
811 
812 static void split_area(struct tee_pager_area_head *area_head,
813 		       struct tee_pager_area *area, struct tee_pager_area *a2,
814 		       vaddr_t va)
815 {
816 	uint32_t exceptions = pager_lock_check_stack(64);
817 	size_t diff = va - area->base;
818 
819 	a2->fobj = fobj_get(area->fobj);
820 	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
821 	a2->type = area->type;
822 	a2->flags = area->flags;
823 	a2->base = va;
824 	a2->size = area->size - diff;
825 	a2->pgt = area->pgt;
826 	area->size = diff;
827 
828 	TAILQ_INSERT_AFTER(area_head, area, a2, link);
829 	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
830 
831 	pager_unlock(exceptions);
832 }
833 DECLARE_KEEP_PAGER(split_area);
834 
835 TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
836 {
837 	struct tee_pager_area *area = NULL;
838 	struct tee_pager_area *a2 = NULL;
839 
840 	if (va & SMALL_PAGE_MASK)
841 		return TEE_ERROR_BAD_PARAMETERS;
842 
843 	TAILQ_FOREACH(area, uctx->areas, link) {
844 		if (va == area->base || va == area->base + area->size)
845 			return TEE_SUCCESS;
846 		if (va > area->base && va < area->base + area->size) {
847 			a2 = calloc(1, sizeof(*a2));
848 			if (!a2)
849 				return TEE_ERROR_OUT_OF_MEMORY;
850 			split_area(uctx->areas, area, a2, va);
851 			return TEE_SUCCESS;
852 		}
853 	}
854 
855 	return TEE_SUCCESS;
856 }
857 
858 static void merge_area_with_next(struct tee_pager_area_head *area_head,
859 				 struct tee_pager_area *a,
860 				 struct tee_pager_area *a_next)
861 {
862 	uint32_t exceptions = pager_lock_check_stack(64);
863 
864 	TAILQ_REMOVE(area_head, a_next, link);
865 	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
866 	a->size += a_next->size;
867 
868 	pager_unlock(exceptions);
869 }
870 DECLARE_KEEP_PAGER(merge_area_with_next);
871 
872 void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
873 			       size_t len)
874 {
875 	struct tee_pager_area *a_next = NULL;
876 	struct tee_pager_area *a = NULL;
877 	vaddr_t end_va = 0;
878 
879 	if ((va | len) & SMALL_PAGE_MASK)
880 		return;
881 	if (ADD_OVERFLOW(va, len, &end_va))
882 		return;
883 
884 	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
885 		a_next = TAILQ_NEXT(a, link);
886 		if (!a_next)
887 			return;
888 
889 		/* Try merging with the area just before va */
890 		if (a->base + a->size < va)
891 			continue;
892 
893 		/*
894 		 * If a->base is well past our range we're done.
895 		 * Note that if it's just the page after our range we'll
896 		 * try to merge.
897 		 */
898 		if (a->base > end_va)
899 			return;
900 
901 		if (a->base + a->size != a_next->base)
902 			continue;
903 		if (a->fobj != a_next->fobj || a->type != a_next->type ||
904 		    a->flags != a_next->flags || a->pgt != a_next->pgt)
905 			continue;
906 		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
907 		    a_next->fobj_pgoffs)
908 			continue;
909 
910 		merge_area_with_next(uctx->areas, a, a_next);
911 		free_area(a_next);
912 		a_next = a;
913 	}
914 }
915 
916 static void rem_area(struct tee_pager_area_head *area_head,
917 		     struct tee_pager_area *area)
918 {
919 	struct tee_pager_pmem *pmem;
920 	size_t last_pgoffs = area->fobj_pgoffs +
921 			     (area->size >> SMALL_PAGE_SHIFT) - 1;
922 	uint32_t exceptions;
923 	size_t idx = 0;
924 	uint32_t a = 0;
925 
926 	exceptions = pager_lock_check_stack(64);
927 
928 	TAILQ_REMOVE(area_head, area, link);
929 	TAILQ_REMOVE(&area->fobj->areas, area, fobj_link);
930 
931 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
932 		if (pmem->fobj != area->fobj ||
933 		    pmem->fobj_pgidx < area->fobj_pgoffs ||
934 		    pmem->fobj_pgidx > last_pgoffs)
935 			continue;
936 
937 		idx = pmem_get_area_tblidx(pmem, area);
938 		area_get_entry(area, idx, NULL, &a);
939 		if (!(a & TEE_MATTR_VALID_BLOCK))
940 			continue;
941 
942 		area_set_entry(area, idx, 0, 0);
943 		area_tlbi_entry(area, idx);
944 		pgt_dec_used_entries(area->pgt);
945 	}
946 
947 	pager_unlock(exceptions);
948 
949 	free_area(area);
950 }
951 DECLARE_KEEP_PAGER(rem_area);
952 
953 void tee_pager_rem_um_region(struct user_mode_ctx *uctx, vaddr_t base,
954 			     size_t size)
955 {
956 	struct tee_pager_area *area;
957 	struct tee_pager_area *next_a;
958 	size_t s = ROUNDUP(size, SMALL_PAGE_SIZE);
959 
960 	TAILQ_FOREACH_SAFE(area, uctx->areas, link, next_a) {
961 		if (core_is_buffer_inside(area->base, area->size, base, s))
962 			rem_area(uctx->areas, area);
963 	}
964 	tlbi_asid(uctx->vm_info.asid);
965 }
966 
967 void tee_pager_rem_um_areas(struct user_mode_ctx *uctx)
968 {
969 	struct tee_pager_area *area = NULL;
970 
971 	if (!uctx->areas)
972 		return;
973 
974 	while (true) {
975 		area = TAILQ_FIRST(uctx->areas);
976 		if (!area)
977 			break;
978 		unlink_area(uctx->areas, area);
979 		free_area(area);
980 	}
981 
982 	free(uctx->areas);
983 }
984 
985 static bool __maybe_unused same_context(struct tee_pager_pmem *pmem)
986 {
987 	struct tee_pager_area *a = TAILQ_FIRST(&pmem->fobj->areas);
988 	void *ctx = a->pgt->ctx;
989 
990 	do {
991 		a = TAILQ_NEXT(a, fobj_link);
992 		if (!a)
993 			return true;
994 	} while (a->pgt->ctx == ctx);
995 
996 	return false;
997 }
998 
999 bool tee_pager_set_um_area_attr(struct user_mode_ctx *uctx, vaddr_t base,
1000 				size_t size, uint32_t flags)
1001 {
1002 	bool ret = false;
1003 	vaddr_t b = base;
1004 	size_t s = size;
1005 	size_t s2 = 0;
1006 	struct tee_pager_area *area = find_area(uctx->areas, b);
1007 	uint32_t exceptions = 0;
1008 	struct tee_pager_pmem *pmem = NULL;
1009 	uint32_t a = 0;
1010 	uint32_t f = 0;
1011 	uint32_t mattr = 0;
1012 	uint32_t f2 = 0;
1013 	size_t tblidx = 0;
1014 
1015 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
1016 	if (f & TEE_MATTR_UW)
1017 		f |= TEE_MATTR_PW;
1018 	mattr = get_area_mattr(f);
1019 
1020 	exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1021 
1022 	while (s) {
1023 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
1024 		if (!area || area->base != b || area->size != s2) {
1025 			ret = false;
1026 			goto out;
1027 		}
1028 		b += s2;
1029 		s -= s2;
1030 
1031 		if (area->flags == f)
1032 			goto next_area;
1033 
1034 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1035 			if (!pmem_is_covered_by_area(pmem, area))
1036 				continue;
1037 
1038 			tblidx = pmem_get_area_tblidx(pmem, area);
1039 			area_get_entry(area, tblidx, NULL, &a);
1040 			if (a == f)
1041 				continue;
1042 			area_set_entry(area, tblidx, 0, 0);
1043 			area_tlbi_entry(area, tblidx);
1044 
1045 			pmem->flags &= ~PMEM_FLAG_HIDDEN;
1046 			if (pmem_is_dirty(pmem))
1047 				f2 = mattr;
1048 			else
1049 				f2 = mattr & ~(TEE_MATTR_UW | TEE_MATTR_PW);
1050 			area_set_entry(area, tblidx, get_pmem_pa(pmem), f2);
1051 			if (!(a & TEE_MATTR_VALID_BLOCK))
1052 				pgt_inc_used_entries(area->pgt);
1053 			/*
1054 			 * Make sure the table update is visible before
1055 			 * continuing.
1056 			 */
1057 			dsb_ishst();
1058 
1059 			/*
1060 			 * Here's a problem if this page already is shared.
1061 			 * We need do icache invalidate for each context
1062 			 * in which it is shared. In practice this will
1063 			 * never happen.
1064 			 */
1065 			if (flags & TEE_MATTR_UX) {
1066 				void *va = (void *)area_idx2va(area, tblidx);
1067 
1068 				/* Assert that the pmem isn't shared. */
1069 				assert(same_context(pmem));
1070 
1071 				dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1072 				icache_inv_user_range(va, SMALL_PAGE_SIZE);
1073 			}
1074 		}
1075 
1076 		area->flags = f;
1077 next_area:
1078 		area = TAILQ_NEXT(area, link);
1079 	}
1080 
1081 	ret = true;
1082 out:
1083 	pager_unlock(exceptions);
1084 	return ret;
1085 }
1086 
1087 DECLARE_KEEP_PAGER(tee_pager_set_um_area_attr);
1088 #endif /*CFG_PAGED_USER_TA*/
1089 
1090 void tee_pager_invalidate_fobj(struct fobj *fobj)
1091 {
1092 	struct tee_pager_pmem *pmem;
1093 	uint32_t exceptions;
1094 
1095 	exceptions = pager_lock_check_stack(64);
1096 
1097 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1098 		if (pmem->fobj == fobj)
1099 			pmem_clear(pmem);
1100 
1101 	pager_unlock(exceptions);
1102 }
1103 DECLARE_KEEP_PAGER(tee_pager_invalidate_fobj);
1104 
1105 static struct tee_pager_pmem *pmem_find(struct tee_pager_area *area,
1106 					unsigned int tblidx)
1107 {
1108 	struct tee_pager_pmem *pmem = NULL;
1109 
1110 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link)
1111 		if (pmem->fobj == area->fobj &&
1112 		    pmem_get_area_tblidx(pmem, area) == tblidx)
1113 			return pmem;
1114 
1115 	return NULL;
1116 }
1117 
1118 static bool tee_pager_unhide_page(struct tee_pager_area *area, vaddr_t page_va)
1119 {
1120 	unsigned int tblidx = area_va2idx(area, page_va);
1121 	struct tee_pager_pmem *pmem = pmem_find(area, tblidx);
1122 	uint32_t a = get_area_mattr(area->flags);
1123 	uint32_t attr = 0;
1124 	paddr_t pa = 0;
1125 
1126 	if (!pmem)
1127 		return false;
1128 
1129 	area_get_entry(area, tblidx, NULL, &attr);
1130 	if (attr & TEE_MATTR_VALID_BLOCK)
1131 		return false;
1132 
1133 	/*
1134 	 * The page is hidden, or not not mapped yet. Unhide the page and
1135 	 * move it to the tail.
1136 	 *
1137 	 * Since the page isn't mapped there doesn't exist a valid TLB entry
1138 	 * for this address, so no TLB invalidation is required after setting
1139 	 * the new entry. A DSB is needed though, to make the write visible.
1140 	 *
1141 	 * For user executable pages it's more complicated. Those pages can
1142 	 * be shared between multiple TA mappings and thus populated by
1143 	 * another TA. The reference manual states that:
1144 	 *
1145 	 * "instruction cache maintenance is required only after writing
1146 	 * new data to a physical address that holds an instruction."
1147 	 *
1148 	 * So for hidden pages we would not need to invalidate i-cache, but
1149 	 * for newly populated pages we do. Since we don't know which we
1150 	 * have to assume the worst and always invalidate the i-cache. We
1151 	 * don't need to clean the d-cache though, since that has already
1152 	 * been done earlier.
1153 	 *
1154 	 * Additional bookkeeping to tell if the i-cache invalidation is
1155 	 * needed or not is left as a future optimization.
1156 	 */
1157 
1158 	/* If it's not a dirty block, then it should be read only. */
1159 	if (!pmem_is_dirty(pmem))
1160 		a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1161 
1162 	pa = get_pmem_pa(pmem);
1163 	pmem->flags &= ~PMEM_FLAG_HIDDEN;
1164 	if (area->flags & TEE_MATTR_UX) {
1165 		void *va = (void *)area_idx2va(area, tblidx);
1166 
1167 		/* Set a temporary read-only mapping */
1168 		assert(!(a & (TEE_MATTR_UW | TEE_MATTR_PW)));
1169 		area_set_entry(area, tblidx, pa, a & ~TEE_MATTR_UX);
1170 		dsb_ishst();
1171 
1172 		icache_inv_user_range(va, SMALL_PAGE_SIZE);
1173 
1174 		/* Set the final mapping */
1175 		area_set_entry(area, tblidx, pa, a);
1176 		area_tlbi_entry(area, tblidx);
1177 	} else {
1178 		area_set_entry(area, tblidx, pa, a);
1179 		dsb_ishst();
1180 	}
1181 	pgt_inc_used_entries(area->pgt);
1182 
1183 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1184 	TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1185 	incr_hidden_hits();
1186 	return true;
1187 }
1188 
1189 static void tee_pager_hide_pages(void)
1190 {
1191 	struct tee_pager_pmem *pmem = NULL;
1192 	size_t n = 0;
1193 
1194 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1195 		if (n >= TEE_PAGER_NHIDE)
1196 			break;
1197 		n++;
1198 
1199 		/* we cannot hide pages when pmem->fobj is not defined. */
1200 		if (!pmem->fobj)
1201 			continue;
1202 
1203 		if (pmem_is_hidden(pmem))
1204 			continue;
1205 
1206 		pmem->flags |= PMEM_FLAG_HIDDEN;
1207 		pmem_unmap(pmem, NULL);
1208 	}
1209 }
1210 
1211 static unsigned int __maybe_unused
1212 num_areas_with_pmem(struct tee_pager_pmem *pmem)
1213 {
1214 	struct tee_pager_area *a = NULL;
1215 	unsigned int num_matches = 0;
1216 
1217 	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
1218 		if (pmem_is_covered_by_area(pmem, a))
1219 			num_matches++;
1220 
1221 	return num_matches;
1222 }
1223 
1224 /*
1225  * Find mapped pmem, hide and move to pageble pmem.
1226  * Return false if page was not mapped, and true if page was mapped.
1227  */
1228 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
1229 				       vaddr_t page_va)
1230 {
1231 	struct tee_pager_pmem *pmem;
1232 	size_t tblidx = 0;
1233 	size_t pgidx = area_va2idx(area, page_va) + area->fobj_pgoffs -
1234 		       ((area->base & CORE_MMU_PGDIR_MASK) >> SMALL_PAGE_SHIFT);
1235 
1236 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
1237 		if (pmem->fobj != area->fobj || pmem->fobj_pgidx != pgidx)
1238 			continue;
1239 
1240 		/*
1241 		 * Locked pages may not be shared. We're asserting that the
1242 		 * number of areas using this pmem is one and only one as
1243 		 * we're about to unmap it.
1244 		 */
1245 		assert(num_areas_with_pmem(pmem) == 1);
1246 
1247 		tblidx = pmem_get_area_tblidx(pmem, area);
1248 		area_set_entry(area, tblidx, 0, 0);
1249 		pgt_dec_used_entries(area->pgt);
1250 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
1251 		pmem_clear(pmem);
1252 		tee_pager_npages++;
1253 		set_npages();
1254 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
1255 		incr_zi_released();
1256 		return true;
1257 	}
1258 
1259 	return false;
1260 }
1261 
1262 static void pager_deploy_page(struct tee_pager_pmem *pmem,
1263 			      struct tee_pager_area *area, vaddr_t page_va,
1264 			      bool clean_user_cache, bool writable)
1265 {
1266 	unsigned int tblidx = area_va2idx(area, page_va);
1267 	uint32_t attr = get_area_mattr(area->flags);
1268 	struct core_mmu_table_info *ti = NULL;
1269 	uint8_t *va_alias = pmem->va_alias;
1270 	paddr_t pa = get_pmem_pa(pmem);
1271 	unsigned int idx_alias = 0;
1272 	uint32_t attr_alias = 0;
1273 	paddr_t pa_alias = 0;
1274 
1275 	/* Ensure we are allowed to write to aliased virtual page */
1276 	ti = find_table_info((vaddr_t)va_alias);
1277 	idx_alias = core_mmu_va2idx(ti, (vaddr_t)va_alias);
1278 	core_mmu_get_entry(ti, idx_alias, &pa_alias, &attr_alias);
1279 	if (!(attr_alias & TEE_MATTR_PW)) {
1280 		attr_alias |= TEE_MATTR_PW;
1281 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1282 		tlbi_mva_allasid((vaddr_t)va_alias);
1283 	}
1284 
1285 	asan_tag_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1286 	if (fobj_load_page(pmem->fobj, pmem->fobj_pgidx, va_alias)) {
1287 		EMSG("PH 0x%" PRIxVA " failed", page_va);
1288 		panic();
1289 	}
1290 	switch (area->type) {
1291 	case PAGER_AREA_TYPE_RO:
1292 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1293 		incr_ro_hits();
1294 		/* Forbid write to aliases for read-only (maybe exec) pages */
1295 		attr_alias &= ~TEE_MATTR_PW;
1296 		core_mmu_set_entry(ti, idx_alias, pa_alias, attr_alias);
1297 		tlbi_mva_allasid((vaddr_t)va_alias);
1298 		break;
1299 	case PAGER_AREA_TYPE_RW:
1300 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1301 		if (writable && (attr & (TEE_MATTR_PW | TEE_MATTR_UW)))
1302 			pmem->flags |= PMEM_FLAG_DIRTY;
1303 		incr_rw_hits();
1304 		break;
1305 	case PAGER_AREA_TYPE_LOCK:
1306 		/* Move page to lock list */
1307 		if (tee_pager_npages <= 0)
1308 			panic("Running out of pages");
1309 		tee_pager_npages--;
1310 		set_npages();
1311 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
1312 		break;
1313 	default:
1314 		panic();
1315 	}
1316 	asan_tag_no_access(va_alias, va_alias + SMALL_PAGE_SIZE);
1317 
1318 	if (!writable)
1319 		attr &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
1320 
1321 	/*
1322 	 * We've updated the page using the aliased mapping and
1323 	 * some cache maintenance is now needed if it's an
1324 	 * executable page.
1325 	 *
1326 	 * Since the d-cache is a Physically-indexed,
1327 	 * physically-tagged (PIPT) cache we can clean either the
1328 	 * aliased address or the real virtual address. In this
1329 	 * case we choose the real virtual address.
1330 	 *
1331 	 * The i-cache can also be PIPT, but may be something else
1332 	 * too like VIPT. The current code requires the caches to
1333 	 * implement the IVIPT extension, that is:
1334 	 * "instruction cache maintenance is required only after
1335 	 * writing new data to a physical address that holds an
1336 	 * instruction."
1337 	 *
1338 	 * To portably invalidate the icache the page has to
1339 	 * be mapped at the final virtual address but not
1340 	 * executable.
1341 	 */
1342 	if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1343 		uint32_t mask = TEE_MATTR_PX | TEE_MATTR_UX |
1344 				TEE_MATTR_PW | TEE_MATTR_UW;
1345 		void *va = (void *)page_va;
1346 
1347 		/* Set a temporary read-only mapping */
1348 		area_set_entry(area, tblidx, pa, attr & ~mask);
1349 		area_tlbi_entry(area, tblidx);
1350 
1351 		dcache_clean_range_pou(va, SMALL_PAGE_SIZE);
1352 		if (clean_user_cache)
1353 			icache_inv_user_range(va, SMALL_PAGE_SIZE);
1354 		else
1355 			icache_inv_range(va, SMALL_PAGE_SIZE);
1356 
1357 		/* Set the final mapping */
1358 		area_set_entry(area, tblidx, pa, attr);
1359 		area_tlbi_entry(area, tblidx);
1360 	} else {
1361 		area_set_entry(area, tblidx, pa, attr);
1362 		/*
1363 		 * No need to flush TLB for this entry, it was
1364 		 * invalid. We should use a barrier though, to make
1365 		 * sure that the change is visible.
1366 		 */
1367 		dsb_ishst();
1368 	}
1369 	pgt_inc_used_entries(area->pgt);
1370 
1371 	FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA, page_va, pa);
1372 }
1373 
1374 static void make_dirty_page(struct tee_pager_pmem *pmem,
1375 			    struct tee_pager_area *area, unsigned int tblidx,
1376 			    paddr_t pa, vaddr_t page_va)
1377 {
1378 	assert(area->flags & (TEE_MATTR_UW | TEE_MATTR_PW));
1379 	assert(!(pmem->flags & PMEM_FLAG_DIRTY));
1380 
1381 	FMSG("Dirty %#"PRIxVA, page_va);
1382 	pmem->flags |= PMEM_FLAG_DIRTY;
1383 	area_set_entry(area, tblidx, pa, get_area_mattr(area->flags));
1384 	area_tlbi_page_va(area, page_va);
1385 }
1386 
1387 /*
1388  * This function takes a reference to a page (@fobj + fobj_pgidx) and makes
1389  * the corresponding IV available.
1390  *
1391  * In case the page needs to be saved the IV must be writable, consequently
1392  * is the page holding the IV made dirty. If the page instead only is to
1393  * be verified it's enough that the page holding the IV is readonly and
1394  * thus doesn't have to be made dirty too.
1395  *
1396  * This function depends on pager_spare_pmem pointing to a free pmem when
1397  * entered. In case the page holding the needed IV isn't mapped this spare
1398  * pmem is used to map the page. If this function has used pager_spare_pmem
1399  * and assigned it to NULL it must be reassigned with a new free pmem
1400  * before this function can be called again.
1401  */
1402 static void make_iv_available(struct fobj *fobj, unsigned int fobj_pgidx,
1403 			      bool writable)
1404 {
1405 	struct tee_pager_area *area = pager_iv_area;
1406 	struct tee_pager_pmem *pmem = NULL;
1407 	unsigned int tblidx = 0;
1408 	vaddr_t page_va = 0;
1409 	uint32_t attr = 0;
1410 	paddr_t pa = 0;
1411 
1412 	page_va = fobj_get_iv_vaddr(fobj, fobj_pgidx) & ~SMALL_PAGE_MASK;
1413 	if (!page_va)
1414 		return;
1415 
1416 	assert(area && area->type == PAGER_AREA_TYPE_RW);
1417 	assert(pager_spare_pmem);
1418 	assert(core_is_buffer_inside(page_va, 1, area->base, area->size));
1419 
1420 	tblidx = area_va2idx(area, page_va);
1421 	/*
1422 	 * We don't care if tee_pager_unhide_page() succeeds or not, we're
1423 	 * still checking the attributes afterwards.
1424 	 */
1425 	tee_pager_unhide_page(area, page_va);
1426 	area_get_entry(area, tblidx, &pa, &attr);
1427 	if (!(attr & TEE_MATTR_VALID_BLOCK)) {
1428 		/*
1429 		 * We're using the spare pmem to map the IV corresponding
1430 		 * to another page.
1431 		 */
1432 		pmem = pager_spare_pmem;
1433 		pager_spare_pmem = NULL;
1434 		pmem_assign_fobj_page(pmem, area, page_va);
1435 
1436 		if (writable)
1437 			pmem->flags |= PMEM_FLAG_DIRTY;
1438 
1439 		pager_deploy_page(pmem, area, page_va,
1440 				  false /*!clean_user_cache*/, writable);
1441 	} else if (writable && !(attr & TEE_MATTR_PW)) {
1442 		pmem = pmem_find(area, tblidx);
1443 		/* Note that pa is valid since TEE_MATTR_VALID_BLOCK is set */
1444 		make_dirty_page(pmem, area, tblidx, pa, page_va);
1445 	}
1446 }
1447 
1448 static void pager_get_page(struct tee_pager_area *area, struct abort_info *ai,
1449 			   bool clean_user_cache)
1450 {
1451 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1452 	unsigned int tblidx = area_va2idx(area, page_va);
1453 	struct tee_pager_pmem *pmem = NULL;
1454 	bool writable = false;
1455 	uint32_t attr = 0;
1456 
1457 	/*
1458 	 * Get a pmem to load code and data into, also make sure
1459 	 * the corresponding IV page is available.
1460 	 */
1461 	while (true) {
1462 		pmem = TAILQ_FIRST(&tee_pager_pmem_head);
1463 		if (!pmem) {
1464 			EMSG("No pmem entries");
1465 			abort_print(ai);
1466 			panic();
1467 		}
1468 
1469 		if (pmem->fobj) {
1470 			pmem_unmap(pmem, NULL);
1471 			if (pmem_is_dirty(pmem)) {
1472 				uint8_t *va = pmem->va_alias;
1473 
1474 				make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1475 						  true /*writable*/);
1476 				asan_tag_access(va, va + SMALL_PAGE_SIZE);
1477 				if (fobj_save_page(pmem->fobj, pmem->fobj_pgidx,
1478 						   pmem->va_alias))
1479 					panic("fobj_save_page");
1480 				asan_tag_no_access(va, va + SMALL_PAGE_SIZE);
1481 
1482 				pmem_clear(pmem);
1483 
1484 				/*
1485 				 * If the spare pmem was used by
1486 				 * make_iv_available() we need to replace
1487 				 * it with the just freed pmem.
1488 				 *
1489 				 * See make_iv_available() for details.
1490 				 */
1491 				if (!pager_spare_pmem) {
1492 					TAILQ_REMOVE(&tee_pager_pmem_head,
1493 						     pmem, link);
1494 					pager_spare_pmem = pmem;
1495 					pmem = NULL;
1496 				}
1497 
1498 				/*
1499 				 * Check if the needed virtual page was
1500 				 * made available as a side effect of the
1501 				 * call to make_iv_available() above. If so
1502 				 * we're done.
1503 				 */
1504 				area_get_entry(area, tblidx, NULL, &attr);
1505 				if (attr & TEE_MATTR_VALID_BLOCK)
1506 					return;
1507 
1508 				/*
1509 				 * The freed pmem was used to replace the
1510 				 * consumed pager_spare_pmem above. Restart
1511 				 * to find another pmem.
1512 				 */
1513 				if (!pmem)
1514 					continue;
1515 			}
1516 		}
1517 
1518 		TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
1519 		pmem_clear(pmem);
1520 
1521 		pmem_assign_fobj_page(pmem, area, page_va);
1522 		make_iv_available(pmem->fobj, pmem->fobj_pgidx,
1523 				  false /*!writable*/);
1524 		if (pager_spare_pmem)
1525 			break;
1526 
1527 		/*
1528 		 * The spare pmem was used by make_iv_available(). We need
1529 		 * to replace it with the just freed pmem. And get another
1530 		 * pmem.
1531 		 *
1532 		 * See make_iv_available() for details.
1533 		 */
1534 		pmem_clear(pmem);
1535 		pager_spare_pmem = pmem;
1536 	}
1537 
1538 	/*
1539 	 * PAGER_AREA_TYPE_LOCK are always writable while PAGER_AREA_TYPE_RO
1540 	 * are never writable.
1541 	 *
1542 	 * Pages from PAGER_AREA_TYPE_RW starts read-only to be
1543 	 * able to tell when they are updated and should be tagged
1544 	 * as dirty.
1545 	 */
1546 	if (area->type == PAGER_AREA_TYPE_LOCK ||
1547 	    (area->type == PAGER_AREA_TYPE_RW && abort_is_write_fault(ai)))
1548 		writable = true;
1549 	else
1550 		writable = false;
1551 
1552 	pager_deploy_page(pmem, area, page_va, clean_user_cache, writable);
1553 }
1554 
1555 static bool pager_update_permissions(struct tee_pager_area *area,
1556 			struct abort_info *ai, bool *handled)
1557 {
1558 	unsigned int pgidx = area_va2idx(area, ai->va);
1559 	struct tee_pager_pmem *pmem = NULL;
1560 	uint32_t attr = 0;
1561 	paddr_t pa = 0;
1562 
1563 	*handled = false;
1564 
1565 	area_get_entry(area, pgidx, &pa, &attr);
1566 
1567 	/* Not mapped */
1568 	if (!(attr & TEE_MATTR_VALID_BLOCK))
1569 		return false;
1570 
1571 	/* Not readable, should not happen */
1572 	if (abort_is_user_exception(ai)) {
1573 		if (!(attr & TEE_MATTR_UR))
1574 			return true;
1575 	} else {
1576 		if (!(attr & TEE_MATTR_PR)) {
1577 			abort_print_error(ai);
1578 			panic();
1579 		}
1580 	}
1581 
1582 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
1583 	case CORE_MMU_FAULT_TRANSLATION:
1584 	case CORE_MMU_FAULT_READ_PERMISSION:
1585 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
1586 			/* Check attempting to execute from an NOX page */
1587 			if (abort_is_user_exception(ai)) {
1588 				if (!(attr & TEE_MATTR_UX))
1589 					return true;
1590 			} else {
1591 				if (!(attr & TEE_MATTR_PX)) {
1592 					abort_print_error(ai);
1593 					panic();
1594 				}
1595 			}
1596 		}
1597 		/* Since the page is mapped now it's OK */
1598 		break;
1599 	case CORE_MMU_FAULT_WRITE_PERMISSION:
1600 		/* Check attempting to write to an RO page */
1601 		pmem = pmem_find(area, pgidx);
1602 		if (!pmem)
1603 			panic();
1604 		if (abort_is_user_exception(ai)) {
1605 			if (!(area->flags & TEE_MATTR_UW))
1606 				return true;
1607 			if (!(attr & TEE_MATTR_UW))
1608 				make_dirty_page(pmem, area, pgidx, pa,
1609 						ai->va & ~SMALL_PAGE_MASK);
1610 		} else {
1611 			if (!(area->flags & TEE_MATTR_PW)) {
1612 				abort_print_error(ai);
1613 				panic();
1614 			}
1615 			if (!(attr & TEE_MATTR_PW))
1616 				make_dirty_page(pmem, area, pgidx, pa,
1617 						ai->va & ~SMALL_PAGE_MASK);
1618 		}
1619 		/* Since permissions has been updated now it's OK */
1620 		break;
1621 	default:
1622 		/* Some fault we can't deal with */
1623 		if (abort_is_user_exception(ai))
1624 			return true;
1625 		abort_print_error(ai);
1626 		panic();
1627 	}
1628 	*handled = true;
1629 	return true;
1630 }
1631 
1632 #ifdef CFG_TEE_CORE_DEBUG
1633 static void stat_handle_fault(void)
1634 {
1635 	static size_t num_faults;
1636 	static size_t min_npages = SIZE_MAX;
1637 	static size_t total_min_npages = SIZE_MAX;
1638 
1639 	num_faults++;
1640 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
1641 		DMSG("nfaults %zu npages %zu (min %zu)",
1642 		     num_faults, tee_pager_npages, min_npages);
1643 		min_npages = tee_pager_npages; /* reset */
1644 	}
1645 	if (tee_pager_npages < min_npages)
1646 		min_npages = tee_pager_npages;
1647 	if (tee_pager_npages < total_min_npages)
1648 		total_min_npages = tee_pager_npages;
1649 }
1650 #else
1651 static void stat_handle_fault(void)
1652 {
1653 }
1654 #endif
1655 
1656 bool tee_pager_handle_fault(struct abort_info *ai)
1657 {
1658 	struct tee_pager_area *area;
1659 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
1660 	uint32_t exceptions;
1661 	bool ret;
1662 	bool clean_user_cache = false;
1663 
1664 #ifdef TEE_PAGER_DEBUG_PRINT
1665 	if (!abort_is_user_exception(ai))
1666 		abort_print(ai);
1667 #endif
1668 
1669 	/*
1670 	 * We're updating pages that can affect several active CPUs at a
1671 	 * time below. We end up here because a thread tries to access some
1672 	 * memory that isn't available. We have to be careful when making
1673 	 * that memory available as other threads may succeed in accessing
1674 	 * that address the moment after we've made it available.
1675 	 *
1676 	 * That means that we can't just map the memory and populate the
1677 	 * page, instead we use the aliased mapping to populate the page
1678 	 * and once everything is ready we map it.
1679 	 */
1680 	exceptions = pager_lock(ai);
1681 
1682 	stat_handle_fault();
1683 
1684 	/* check if the access is valid */
1685 	if (abort_is_user_exception(ai)) {
1686 		area = find_uta_area(ai->va);
1687 		clean_user_cache = true;
1688 	} else {
1689 		area = find_area(&tee_pager_area_head, ai->va);
1690 		if (!area) {
1691 			area = find_uta_area(ai->va);
1692 			clean_user_cache = true;
1693 		}
1694 	}
1695 	if (!area || !area->pgt) {
1696 		ret = false;
1697 		goto out;
1698 	}
1699 
1700 	if (tee_pager_unhide_page(area, page_va))
1701 		goto out_success;
1702 
1703 	/*
1704 	 * The page wasn't hidden, but some other core may have
1705 	 * updated the table entry before we got here or we need
1706 	 * to make a read-only page read-write (dirty).
1707 	 */
1708 	if (pager_update_permissions(area, ai, &ret)) {
1709 		/*
1710 		 * Nothing more to do with the abort. The problem
1711 		 * could already have been dealt with from another
1712 		 * core or if ret is false the TA will be paniced.
1713 		 */
1714 		goto out;
1715 	}
1716 
1717 	pager_get_page(area, ai, clean_user_cache);
1718 
1719 out_success:
1720 	tee_pager_hide_pages();
1721 	ret = true;
1722 out:
1723 	pager_unlock(exceptions);
1724 	return ret;
1725 }
1726 
1727 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1728 {
1729 	size_t n;
1730 
1731 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1732 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1733 
1734 	/* setup memory */
1735 	for (n = 0; n < npages; n++) {
1736 		struct core_mmu_table_info *ti;
1737 		struct tee_pager_pmem *pmem;
1738 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1739 		unsigned int pgidx;
1740 		paddr_t pa;
1741 		uint32_t attr;
1742 
1743 		ti = find_table_info(va);
1744 		pgidx = core_mmu_va2idx(ti, va);
1745 		/*
1746 		 * Note that we can only support adding pages in the
1747 		 * valid range of this table info, currently not a problem.
1748 		 */
1749 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1750 
1751 		/* Ignore unmapped pages/blocks */
1752 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1753 			continue;
1754 
1755 		pmem = calloc(1, sizeof(struct tee_pager_pmem));
1756 		if (!pmem)
1757 			panic("out of mem");
1758 		pmem_clear(pmem);
1759 
1760 		pmem->va_alias = pager_add_alias_page(pa);
1761 
1762 		if (unmap) {
1763 			core_mmu_set_entry(ti, pgidx, 0, 0);
1764 			pgt_dec_used_entries(find_core_pgt(va));
1765 		} else {
1766 			struct tee_pager_area *area = NULL;
1767 
1768 			/*
1769 			 * The page is still mapped, let's assign the area
1770 			 * and update the protection bits accordingly.
1771 			 */
1772 			area = find_area(&tee_pager_area_head, va);
1773 			assert(area && area->pgt == find_core_pgt(va));
1774 			pmem_assign_fobj_page(pmem, area, va);
1775 			assert(pgidx == pmem_get_area_tblidx(pmem, area));
1776 			assert(pa == get_pmem_pa(pmem));
1777 			area_set_entry(area, pgidx, pa,
1778 				       get_area_mattr(area->flags));
1779 		}
1780 
1781 		if (unmap && !pager_spare_pmem) {
1782 			pager_spare_pmem = pmem;
1783 		} else {
1784 			tee_pager_npages++;
1785 			incr_npages_all();
1786 			set_npages();
1787 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1788 		}
1789 	}
1790 
1791 	/*
1792 	 * As this is done at inits, invalidate all TLBs once instead of
1793 	 * targeting only the modified entries.
1794 	 */
1795 	tlbi_all();
1796 }
1797 
1798 #ifdef CFG_PAGED_USER_TA
1799 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1800 {
1801 	struct pgt *p = pgt;
1802 
1803 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1804 		p = SLIST_NEXT(p, link);
1805 	return p;
1806 }
1807 
1808 void tee_pager_assign_um_tables(struct user_mode_ctx *uctx)
1809 {
1810 	struct tee_pager_area *area = NULL;
1811 	struct pgt *pgt = NULL;
1812 
1813 	if (!uctx->areas)
1814 		return;
1815 
1816 	pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1817 	TAILQ_FOREACH(area, uctx->areas, link) {
1818 		if (!area->pgt)
1819 			area->pgt = find_pgt(pgt, area->base);
1820 		else
1821 			assert(area->pgt == find_pgt(pgt, area->base));
1822 		if (!area->pgt)
1823 			panic();
1824 	}
1825 }
1826 
1827 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1828 {
1829 	struct tee_pager_pmem *pmem = NULL;
1830 	struct tee_pager_area *area = NULL;
1831 	struct tee_pager_area_head *areas = NULL;
1832 	uint32_t exceptions = pager_lock_check_stack(SMALL_PAGE_SIZE);
1833 
1834 	if (!pgt->num_used_entries)
1835 		goto out;
1836 
1837 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1838 		if (pmem->fobj)
1839 			pmem_unmap(pmem, pgt);
1840 	}
1841 	assert(!pgt->num_used_entries);
1842 
1843 out:
1844 	areas = to_user_mode_ctx(pgt->ctx)->areas;
1845 	if (areas) {
1846 		TAILQ_FOREACH(area, areas, link) {
1847 			if (area->pgt == pgt)
1848 				area->pgt = NULL;
1849 		}
1850 	}
1851 
1852 	pager_unlock(exceptions);
1853 }
1854 DECLARE_KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1855 #endif /*CFG_PAGED_USER_TA*/
1856 
1857 void tee_pager_release_phys(void *addr, size_t size)
1858 {
1859 	bool unmaped = false;
1860 	vaddr_t va = (vaddr_t)addr;
1861 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1862 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1863 	struct tee_pager_area *area;
1864 	uint32_t exceptions;
1865 
1866 	if (end <= begin)
1867 		return;
1868 
1869 	exceptions = pager_lock_check_stack(128);
1870 
1871 	for (va = begin; va < end; va += SMALL_PAGE_SIZE) {
1872 		area = find_area(&tee_pager_area_head, va);
1873 		if (!area)
1874 			panic();
1875 		unmaped |= tee_pager_release_one_phys(area, va);
1876 	}
1877 
1878 	if (unmaped)
1879 		tlbi_mva_range(begin, end - begin, SMALL_PAGE_SIZE);
1880 
1881 	pager_unlock(exceptions);
1882 }
1883 DECLARE_KEEP_PAGER(tee_pager_release_phys);
1884 
1885 void *tee_pager_alloc(size_t size)
1886 {
1887 	tee_mm_entry_t *mm = NULL;
1888 	uint8_t *smem = NULL;
1889 	size_t num_pages = 0;
1890 	struct fobj *fobj = NULL;
1891 
1892 	if (!size)
1893 		return NULL;
1894 
1895 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1896 	if (!mm)
1897 		return NULL;
1898 
1899 	smem = (uint8_t *)tee_mm_get_smem(mm);
1900 	num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
1901 	fobj = fobj_locked_paged_alloc(num_pages);
1902 	if (!fobj) {
1903 		tee_mm_free(mm);
1904 		return NULL;
1905 	}
1906 
1907 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_LOCK, fobj);
1908 	fobj_put(fobj);
1909 
1910 	asan_tag_access(smem, smem + num_pages * SMALL_PAGE_SIZE);
1911 
1912 	return smem;
1913 }
1914 
1915 vaddr_t tee_pager_init_iv_area(struct fobj *fobj)
1916 {
1917 	tee_mm_entry_t *mm = NULL;
1918 	uint8_t *smem = NULL;
1919 
1920 	assert(!pager_iv_area);
1921 
1922 	mm = tee_mm_alloc(&tee_mm_vcore, fobj->num_pages * SMALL_PAGE_SIZE);
1923 	if (!mm)
1924 		panic();
1925 
1926 	smem = (uint8_t *)tee_mm_get_smem(mm);
1927 	tee_pager_add_core_area((vaddr_t)smem, PAGER_AREA_TYPE_RW, fobj);
1928 	fobj_put(fobj);
1929 
1930 	asan_tag_access(smem, smem + fobj->num_pages * SMALL_PAGE_SIZE);
1931 
1932 	pager_iv_area = find_area(&tee_pager_area_head, (vaddr_t)smem);
1933 	assert(pager_iv_area && pager_iv_area->fobj == fobj);
1934 
1935 	return (vaddr_t)smem;
1936 }
1937