xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision ef4bc451c262f007562867ea4e5f4ca9f26459fd)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/spinlock.h>
36 #include <kernel/tee_misc.h>
37 #include <kernel/tee_ta_manager.h>
38 #include <kernel/thread.h>
39 #include <mm/core_memprot.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_mmu_defs.h>
42 #include <mm/tee_pager.h>
43 #include <types_ext.h>
44 #include <stdlib.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <utee_defines.h>
49 #include <util.h>
50 
51 #include "pager_private.h"
52 
53 #define PAGER_AE_KEY_BITS	256
54 
55 struct pager_rw_pstate {
56 	uint64_t iv;
57 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
58 };
59 
60 enum area_type {
61 	AREA_TYPE_RO,
62 	AREA_TYPE_RW,
63 	AREA_TYPE_LOCK,
64 };
65 
66 struct tee_pager_area {
67 	union {
68 		const uint8_t *hashes;
69 		struct pager_rw_pstate *rwp;
70 	} u;
71 	uint8_t *store;
72 	enum area_type type;
73 	uint32_t flags;
74 	vaddr_t base;
75 	size_t size;
76 	struct pgt *pgt;
77 	TAILQ_ENTRY(tee_pager_area) link;
78 };
79 
80 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
81 
82 static struct tee_pager_area_head tee_pager_area_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
84 
85 #define INVALID_PGIDX	UINT_MAX
86 
87 /*
88  * struct tee_pager_pmem - Represents a physical page used for paging.
89  *
90  * @pgidx	an index of the entry in area->ti.
91  * @va_alias	Virtual address where the physical page always is aliased.
92  *		Used during remapping of the page when the content need to
93  *		be updated before it's available at the new location.
94  * @area	a pointer to the pager area
95  */
96 struct tee_pager_pmem {
97 	unsigned pgidx;
98 	void *va_alias;
99 	struct tee_pager_area *area;
100 	TAILQ_ENTRY(tee_pager_pmem) link;
101 };
102 
103 /* The list of physical pages. The first page in the list is the oldest */
104 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
105 
106 static struct tee_pager_pmem_head tee_pager_pmem_head =
107 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
108 
109 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
110 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
111 
112 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
113 
114 /* number of pages hidden */
115 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
116 
117 /* Number of registered physical pages, used hiding pages. */
118 static size_t tee_pager_npages;
119 
120 #ifdef CFG_WITH_STATS
121 static struct tee_pager_stats pager_stats;
122 
123 static inline void incr_ro_hits(void)
124 {
125 	pager_stats.ro_hits++;
126 }
127 
128 static inline void incr_rw_hits(void)
129 {
130 	pager_stats.rw_hits++;
131 }
132 
133 static inline void incr_hidden_hits(void)
134 {
135 	pager_stats.hidden_hits++;
136 }
137 
138 static inline void incr_zi_released(void)
139 {
140 	pager_stats.zi_released++;
141 }
142 
143 static inline void incr_npages_all(void)
144 {
145 	pager_stats.npages_all++;
146 }
147 
148 static inline void set_npages(void)
149 {
150 	pager_stats.npages = tee_pager_npages;
151 }
152 
153 void tee_pager_get_stats(struct tee_pager_stats *stats)
154 {
155 	*stats = pager_stats;
156 
157 	pager_stats.hidden_hits = 0;
158 	pager_stats.ro_hits = 0;
159 	pager_stats.rw_hits = 0;
160 	pager_stats.zi_released = 0;
161 }
162 
163 #else /* CFG_WITH_STATS */
164 static inline void incr_ro_hits(void) { }
165 static inline void incr_rw_hits(void) { }
166 static inline void incr_hidden_hits(void) { }
167 static inline void incr_zi_released(void) { }
168 static inline void incr_npages_all(void) { }
169 static inline void set_npages(void) { }
170 
171 void tee_pager_get_stats(struct tee_pager_stats *stats)
172 {
173 	memset(stats, 0, sizeof(struct tee_pager_stats));
174 }
175 #endif /* CFG_WITH_STATS */
176 
177 static struct pgt pager_core_pgt;
178 struct core_mmu_table_info tee_pager_tbl_info;
179 static struct core_mmu_table_info pager_alias_tbl_info;
180 
181 static unsigned pager_spinlock = SPINLOCK_UNLOCK;
182 
183 /* Defines the range of the alias area */
184 static tee_mm_entry_t *pager_alias_area;
185 /*
186  * Physical pages are added in a stack like fashion to the alias area,
187  * @pager_alias_next_free gives the address of next free entry if
188  * @pager_alias_next_free is != 0
189  */
190 static uintptr_t pager_alias_next_free;
191 
192 static uint32_t pager_lock(void)
193 {
194 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
195 
196 	cpu_spin_lock(&pager_spinlock);
197 	return exceptions;
198 }
199 
200 static void pager_unlock(uint32_t exceptions)
201 {
202 	cpu_spin_unlock(&pager_spinlock);
203 	thread_set_exceptions(exceptions);
204 }
205 
206 static void set_alias_area(tee_mm_entry_t *mm)
207 {
208 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
209 	size_t tbl_va_size;
210 	unsigned idx;
211 	unsigned last_idx;
212 	vaddr_t smem = tee_mm_get_smem(mm);
213 	size_t nbytes = tee_mm_get_bytes(mm);
214 
215 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
216 
217 	if (pager_alias_area)
218 		panic("null pager_alias_area");
219 
220 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
221 		panic("Can't find translation table");
222 
223 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
224 		panic("Unsupported page size in translation table");
225 
226 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
227 	if (!core_is_buffer_inside(smem, nbytes,
228 				   ti->va_base, tbl_va_size)) {
229 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
230 		     smem, nbytes, ti->va_base, tbl_va_size);
231 		panic();
232 	}
233 
234 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
235 		panic("invalid area alignment");
236 
237 	pager_alias_area = mm;
238 	pager_alias_next_free = smem;
239 
240 	/* Clear all mapping in the alias area */
241 	idx = core_mmu_va2idx(ti, smem);
242 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
243 	for (; idx < last_idx; idx++)
244 		core_mmu_set_entry(ti, idx, 0, 0);
245 
246 	/* TODO only invalidate entries touched above */
247 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
248 }
249 
250 static void generate_ae_key(void)
251 {
252 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
253 		panic("failed to generate random");
254 }
255 
256 void tee_pager_init(tee_mm_entry_t *mm_alias)
257 {
258 	set_alias_area(mm_alias);
259 	generate_ae_key();
260 }
261 
262 static void *pager_add_alias_page(paddr_t pa)
263 {
264 	unsigned idx;
265 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
266 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
267 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
268 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
269 
270 	DMSG("0x%" PRIxPA, pa);
271 
272 	if (!pager_alias_next_free || !ti->num_entries)
273 		panic("invalid alias entry");
274 
275 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
276 	core_mmu_set_entry(ti, idx, pa, attr);
277 	pgt_inc_used_entries(&pager_core_pgt);
278 	pager_alias_next_free += SMALL_PAGE_SIZE;
279 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
280 				      tee_mm_get_bytes(pager_alias_area)))
281 		pager_alias_next_free = 0;
282 	return (void *)core_mmu_idx2va(ti, idx);
283 }
284 
285 static struct tee_pager_area *alloc_area(struct pgt *pgt,
286 					 vaddr_t base, size_t size,
287 					 uint32_t flags, const void *store,
288 					 const void *hashes)
289 {
290 	struct tee_pager_area *area = calloc(1, sizeof(*area));
291 	enum area_type at;
292 	tee_mm_entry_t *mm_store = NULL;
293 
294 	if (!area)
295 		return NULL;
296 
297 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
298 		if (flags & TEE_MATTR_LOCKED) {
299 			at = AREA_TYPE_LOCK;
300 			goto out;
301 		}
302 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
303 		if (!mm_store)
304 			goto bad;
305 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
306 					   MEM_AREA_TA_RAM);
307 		if (!area->store)
308 			goto bad;
309 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
310 				     sizeof(struct pager_rw_pstate));
311 		if (!area->u.rwp)
312 			goto bad;
313 		at = AREA_TYPE_RW;
314 	} else {
315 		area->store = (void *)store;
316 		area->u.hashes = hashes;
317 		at = AREA_TYPE_RO;
318 	}
319 out:
320 	area->pgt = pgt;
321 	area->base = base;
322 	area->size = size;
323 	area->flags = flags;
324 	area->type = at;
325 	return area;
326 bad:
327 	tee_mm_free(mm_store);
328 	free(area->u.rwp);
329 	free(area);
330 	return NULL;
331 }
332 
333 static void area_insert_tail(struct tee_pager_area *area)
334 {
335 	uint32_t exceptions = pager_lock();
336 
337 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
338 
339 	pager_unlock(exceptions);
340 }
341 KEEP_PAGER(area_insert_tail);
342 
343 static size_t tbl_usage_count(struct pgt *pgt)
344 {
345 	size_t n;
346 	paddr_t pa;
347 	size_t usage = 0;
348 
349 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
350 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
351 					     n, &pa, NULL);
352 		if (pa)
353 			usage++;
354 	}
355 	return usage;
356 }
357 
358 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
359 			const void *store, const void *hashes)
360 {
361 	struct tee_pager_area *area;
362 	size_t tbl_va_size;
363 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
364 
365 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
366 		base, base + size, flags, store, hashes);
367 
368 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
369 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
370 		panic();
371 	}
372 
373 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
374 		panic("write pages cannot provide store or hashes");
375 
376 	if ((flags & TEE_MATTR_PW) && (store || hashes))
377 		panic("non-write pages must provide store and hashes");
378 
379 	if (!pager_core_pgt.tbl) {
380 		pager_core_pgt.tbl = ti->table;
381 		pgt_set_used_entries(&pager_core_pgt,
382 				     tbl_usage_count(&pager_core_pgt));
383 	}
384 
385 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
386 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
387 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
388 			base, size, ti->va_base, tbl_va_size);
389 		return false;
390 	}
391 
392 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
393 	if (!area)
394 		return false;
395 
396 	area_insert_tail(area);
397 	return true;
398 }
399 
400 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
401 					vaddr_t va)
402 {
403 	struct tee_pager_area *area;
404 
405 	if (!areas)
406 		return NULL;
407 
408 	TAILQ_FOREACH(area, areas, link) {
409 		if (core_is_buffer_inside(va, 1, area->base, area->size))
410 			return area;
411 	}
412 	return NULL;
413 }
414 
415 #ifdef CFG_PAGED_USER_TA
416 static struct tee_pager_area *find_uta_area(vaddr_t va)
417 {
418 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
419 
420 	if (!ctx || !is_user_ta_ctx(ctx))
421 		return NULL;
422 	return find_area(to_user_ta_ctx(ctx)->areas, va);
423 }
424 #else
425 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
426 {
427 	return NULL;
428 }
429 #endif /*CFG_PAGED_USER_TA*/
430 
431 
432 static uint32_t get_area_mattr(uint32_t area_flags)
433 {
434 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
435 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
436 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
437 
438 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
439 		attr |= TEE_MATTR_GLOBAL;
440 
441 	return attr;
442 }
443 
444 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
445 {
446 	paddr_t pa;
447 	unsigned idx;
448 
449 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
450 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
451 	return pa;
452 }
453 
454 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
455 			void *dst)
456 {
457 	struct pager_aes_gcm_iv iv = {
458 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
459 	};
460 
461 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
462 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
463 }
464 
465 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
466 {
467 	struct pager_aes_gcm_iv iv;
468 
469 	assert((rwp->iv + 1) > rwp->iv);
470 	rwp->iv++;
471 	/*
472 	 * IV is constructed as recommended in section "8.2.1 Deterministic
473 	 * Construction" of "Recommendation for Block Cipher Modes of
474 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
475 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
476 	 */
477 	iv.iv[0] = (vaddr_t)rwp;
478 	iv.iv[1] = rwp->iv >> 32;
479 	iv.iv[2] = rwp->iv;
480 
481 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
482 				   &iv, rwp->tag,
483 				   src, dst, SMALL_PAGE_SIZE))
484 		panic("gcm failed");
485 }
486 
487 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
488 			void *va_alias)
489 {
490 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
491 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
492 
493 	switch (area->type) {
494 	case AREA_TYPE_RO:
495 		{
496 			const void *hash = area->u.hashes +
497 					   idx * TEE_SHA256_HASH_SIZE;
498 
499 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
500 			incr_ro_hits();
501 
502 			if (hash_sha256_check(hash, va_alias,
503 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
504 				EMSG("PH 0x%" PRIxVA " failed", page_va);
505 				panic();
506 			}
507 		}
508 		break;
509 	case AREA_TYPE_RW:
510 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
511 			va_alias, page_va, area->u.rwp[idx].iv);
512 		if (!area->u.rwp[idx].iv)
513 			memset(va_alias, 0, SMALL_PAGE_SIZE);
514 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
515 				       va_alias)) {
516 			EMSG("PH 0x%" PRIxVA " failed", page_va);
517 			panic();
518 		}
519 		incr_rw_hits();
520 		break;
521 	case AREA_TYPE_LOCK:
522 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
523 		memset(va_alias, 0, SMALL_PAGE_SIZE);
524 		break;
525 	default:
526 		panic();
527 	}
528 }
529 
530 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
531 {
532 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
533 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
534 
535 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
536 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
537 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
538 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
539 
540 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
541 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
542 			     stored_page);
543 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
544 			pmem->area->base + idx * SMALL_PAGE_SIZE,
545 			pmem->area->u.rwp[idx].iv);
546 	}
547 }
548 
549 static void area_get_entry(struct tee_pager_area *area, size_t idx,
550 			   paddr_t *pa, uint32_t *attr)
551 {
552 	assert(area->pgt);
553 	assert(idx < tee_pager_tbl_info.num_entries);
554 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
555 				     idx, pa, attr);
556 }
557 
558 static void area_set_entry(struct tee_pager_area *area, size_t idx,
559 			   paddr_t pa, uint32_t attr)
560 {
561 	assert(area->pgt);
562 	assert(idx < tee_pager_tbl_info.num_entries);
563 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
564 				     idx, pa, attr);
565 }
566 
567 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
568 {
569 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
570 }
571 
572 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
573 					  size_t idx)
574 {
575 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
576 }
577 
578 #ifdef CFG_PAGED_USER_TA
579 static void free_area(struct tee_pager_area *area)
580 {
581 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
582 				virt_to_phys(area->store)));
583 	if (area->type == AREA_TYPE_RW)
584 		free(area->u.rwp);
585 	free(area);
586 }
587 
588 static bool pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base,
589 			       size_t size)
590 {
591 	struct tee_pager_area *area;
592 	uint32_t flags;
593 	vaddr_t b = base;
594 	size_t s = size;
595 
596 	if (!utc->areas) {
597 		utc->areas = malloc(sizeof(*utc->areas));
598 		if (!utc->areas)
599 			return false;
600 		TAILQ_INIT(utc->areas);
601 	}
602 
603 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
604 
605 	while (s) {
606 		size_t s2;
607 
608 		if (find_area(utc->areas, b))
609 			return false;
610 
611 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
612 
613 		/* Table info will be set when the context is activated. */
614 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
615 		if (!area)
616 			return false;
617 		TAILQ_INSERT_TAIL(utc->areas, area, link);
618 		b += s2;
619 		s -= s2;
620 	}
621 
622 	return true;
623 }
624 
625 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
626 {
627 	return pager_add_uta_area(utc, base, size);
628 }
629 
630 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
631 {
632 	struct tee_pager_area *area;
633 
634 	if (!utc->areas)
635 		return;
636 
637 	while (true) {
638 		area = TAILQ_FIRST(utc->areas);
639 		if (!area)
640 			break;
641 		TAILQ_REMOVE(utc->areas, area, link);
642 		free_area(area);
643 	}
644 
645 	free(utc->areas);
646 }
647 
648 bool tee_pager_set_uta_area_attr(struct user_ta_ctx *utc, vaddr_t base,
649 				 size_t size, uint32_t flags)
650 {
651 	bool ret;
652 	vaddr_t b = base;
653 	size_t s = size;
654 	size_t s2;
655 	struct tee_pager_area *area = find_area(utc->areas, b);
656 	uint32_t exceptions;
657 	struct tee_pager_pmem *pmem;
658 	paddr_t pa;
659 	uint32_t a;
660 	uint32_t f;
661 
662 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
663 	if (f & TEE_MATTR_UW)
664 		f |= TEE_MATTR_PW;
665 	f = get_area_mattr(f);
666 
667 	exceptions = pager_lock();
668 
669 	while (s) {
670 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
671 		if (!area || area->base != b || area->size != s2) {
672 			ret = false;
673 			goto out;
674 		}
675 		b += s2;
676 		s -= s2;
677 
678 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
679 			if (pmem->area != area)
680 				continue;
681 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
682 			if (a & TEE_MATTR_VALID_BLOCK)
683 				assert(pa == get_pmem_pa(pmem));
684 			else
685 				pa = get_pmem_pa(pmem);
686 			if (a == f)
687 				continue;
688 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
689 			/* TODO only invalidate entries touched above */
690 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
691 			if (!(flags & TEE_MATTR_UW))
692 				tee_pager_save_page(pmem, a);
693 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
694 		}
695 
696 		area->flags = f;
697 		area = TAILQ_NEXT(area, link);
698 	}
699 
700 	ret = true;
701 out:
702 	pager_unlock(exceptions);
703 	return ret;
704 }
705 KEEP_PAGER(tee_pager_set_uta_area_attr);
706 #endif /*CFG_PAGED_USER_TA*/
707 
708 static bool tee_pager_unhide_page(vaddr_t page_va)
709 {
710 	struct tee_pager_pmem *pmem;
711 
712 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
713 		paddr_t pa;
714 		uint32_t attr;
715 
716 		if (pmem->pgidx == INVALID_PGIDX)
717 			continue;
718 
719 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
720 
721 		if (!(attr &
722 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
723 			continue;
724 
725 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
726 			uint32_t a = get_area_mattr(pmem->area->flags);
727 
728 			/* page is hidden, show and move to back */
729 			if (pa != get_pmem_pa(pmem))
730 				panic("unexpected pa");
731 
732 			/*
733 			 * If it's not a dirty block, then it should be
734 			 * read only.
735 			 */
736 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
737 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
738 			else
739 				FMSG("Unhide %#" PRIxVA, page_va);
740 
741 			if (page_va == 0x8000a000)
742 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
743 					page_va, a);
744 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
745 
746 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
747 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
748 
749 			/* TODO only invalidate entry touched above */
750 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
751 
752 			incr_hidden_hits();
753 			return true;
754 		}
755 	}
756 
757 	return false;
758 }
759 
760 static void tee_pager_hide_pages(void)
761 {
762 	struct tee_pager_pmem *pmem;
763 	size_t n = 0;
764 
765 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
766 		paddr_t pa;
767 		uint32_t attr;
768 		uint32_t a;
769 
770 		if (n >= TEE_PAGER_NHIDE)
771 			break;
772 		n++;
773 
774 		/* we cannot hide pages when pmem->area is not defined. */
775 		if (!pmem->area)
776 			continue;
777 
778 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
779 		if (!(attr & TEE_MATTR_VALID_BLOCK))
780 			continue;
781 
782 		assert(pa == get_pmem_pa(pmem));
783 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
784 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
785 			FMSG("Hide %#" PRIxVA,
786 			     area_idx2va(pmem->area, pmem->pgidx));
787 		} else
788 			a = TEE_MATTR_HIDDEN_BLOCK;
789 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
790 	}
791 
792 	/* TODO only invalidate entries touched above */
793 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
794 }
795 
796 /*
797  * Find mapped pmem, hide and move to pageble pmem.
798  * Return false if page was not mapped, and true if page was mapped.
799  */
800 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
801 				       vaddr_t page_va)
802 {
803 	struct tee_pager_pmem *pmem;
804 	unsigned pgidx;
805 	paddr_t pa;
806 	uint32_t attr;
807 
808 	pgidx = area_va2idx(area, page_va);
809 	area_get_entry(area, pgidx, &pa, &attr);
810 
811 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
812 
813 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
814 		if (pmem->area != area || pmem->pgidx != pgidx)
815 			continue;
816 
817 		assert(pa == get_pmem_pa(pmem));
818 		area_set_entry(area, pgidx, 0, 0);
819 		pgt_dec_used_entries(area->pgt);
820 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
821 		pmem->area = NULL;
822 		pmem->pgidx = INVALID_PGIDX;
823 		tee_pager_npages++;
824 		set_npages();
825 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
826 		incr_zi_released();
827 		return true;
828 	}
829 
830 	return false;
831 }
832 
833 /* Finds the oldest page and unmats it from its old virtual address */
834 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
835 {
836 	struct tee_pager_pmem *pmem;
837 
838 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
839 	if (!pmem) {
840 		EMSG("No pmem entries");
841 		return NULL;
842 	}
843 	if (pmem->pgidx != INVALID_PGIDX) {
844 		uint32_t a;
845 
846 		assert(pmem->area && pmem->area->pgt);
847 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
848 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
849 		pgt_dec_used_entries(pmem->area->pgt);
850 		/* TODO only invalidate entries touched above */
851 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
852 		tee_pager_save_page(pmem, a);
853 	}
854 
855 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
856 	pmem->pgidx = INVALID_PGIDX;
857 	pmem->area = NULL;
858 	if (area->type == AREA_TYPE_LOCK) {
859 		/* Move page to lock list */
860 		if (tee_pager_npages <= 0)
861 			panic("running out of page");
862 		tee_pager_npages--;
863 		set_npages();
864 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
865 	} else {
866 		/* move page to back */
867 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
868 	}
869 
870 	return pmem;
871 }
872 
873 static bool pager_update_permissions(struct tee_pager_area *area,
874 			struct abort_info *ai, bool *handled)
875 {
876 	unsigned int pgidx = area_va2idx(area, ai->va);
877 	uint32_t attr;
878 	paddr_t pa;
879 
880 	*handled = false;
881 
882 	area_get_entry(area, pgidx, &pa, &attr);
883 
884 	/* Not mapped */
885 	if (!(attr & TEE_MATTR_VALID_BLOCK))
886 		return false;
887 
888 	/* Not readable, should not happen */
889 	if (abort_is_user_exception(ai)) {
890 		if (!(attr & TEE_MATTR_UR))
891 			return true;
892 	} else {
893 		if (!(attr & TEE_MATTR_PR)) {
894 			abort_print_error(ai);
895 			panic();
896 		}
897 	}
898 
899 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
900 	case CORE_MMU_FAULT_TRANSLATION:
901 	case CORE_MMU_FAULT_READ_PERMISSION:
902 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
903 			/* Check attempting to execute from an NOX page */
904 			if (abort_is_user_exception(ai)) {
905 				if (!(attr & TEE_MATTR_UX))
906 					return true;
907 			} else {
908 				if (!(attr & TEE_MATTR_PX)) {
909 					abort_print_error(ai);
910 					panic();
911 				}
912 			}
913 		}
914 		/* Since the page is mapped now it's OK */
915 		break;
916 	case CORE_MMU_FAULT_WRITE_PERMISSION:
917 		/* Check attempting to write to an RO page */
918 		if (abort_is_user_exception(ai)) {
919 			if (!(area->flags & TEE_MATTR_UW))
920 				return true;
921 			if (!(attr & TEE_MATTR_UW)) {
922 				FMSG("Dirty %p",
923 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
924 				area_set_entry(area, pgidx, pa,
925 					       get_area_mattr(area->flags));
926 				/* TODO only invalidate entry above */
927 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
928 			}
929 
930 		} else {
931 			if (!(area->flags & TEE_MATTR_PW)) {
932 				abort_print_error(ai);
933 				panic();
934 			}
935 			if (!(attr & TEE_MATTR_PW)) {
936 				FMSG("Dirty %p",
937 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
938 				area_set_entry(area, pgidx, pa,
939 					       get_area_mattr(area->flags));
940 				/* TODO only invalidate entry above */
941 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
942 			}
943 		}
944 		/* Since permissions has been updated now it's OK */
945 		break;
946 	default:
947 		/* Some fault we can't deal with */
948 		if (abort_is_user_exception(ai))
949 			return true;
950 		abort_print_error(ai);
951 		panic();
952 	}
953 	*handled = true;
954 	return true;
955 }
956 
957 #ifdef CFG_TEE_CORE_DEBUG
958 static void stat_handle_fault(void)
959 {
960 	static size_t num_faults;
961 	static size_t min_npages = SIZE_MAX;
962 	static size_t total_min_npages = SIZE_MAX;
963 
964 	num_faults++;
965 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
966 		DMSG("nfaults %zu npages %zu (min %zu)",
967 		     num_faults, tee_pager_npages, min_npages);
968 		min_npages = tee_pager_npages; /* reset */
969 	}
970 	if (tee_pager_npages < min_npages)
971 		min_npages = tee_pager_npages;
972 	if (tee_pager_npages < total_min_npages)
973 		total_min_npages = tee_pager_npages;
974 }
975 #else
976 static void stat_handle_fault(void)
977 {
978 }
979 #endif
980 
981 bool tee_pager_handle_fault(struct abort_info *ai)
982 {
983 	struct tee_pager_area *area;
984 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
985 	uint32_t exceptions;
986 	bool ret;
987 
988 #ifdef TEE_PAGER_DEBUG_PRINT
989 	abort_print(ai);
990 #endif
991 
992 	/*
993 	 * We're updating pages that can affect several active CPUs at a
994 	 * time below. We end up here because a thread tries to access some
995 	 * memory that isn't available. We have to be careful when making
996 	 * that memory available as other threads may succeed in accessing
997 	 * that address the moment after we've made it available.
998 	 *
999 	 * That means that we can't just map the memory and populate the
1000 	 * page, instead we use the aliased mapping to populate the page
1001 	 * and once everything is ready we map it.
1002 	 */
1003 	exceptions = pager_lock();
1004 
1005 	stat_handle_fault();
1006 
1007 	/* check if the access is valid */
1008 	if (abort_is_user_exception(ai)) {
1009 		area = find_uta_area(ai->va);
1010 
1011 	} else {
1012 		area = find_area(&tee_pager_area_head, ai->va);
1013 		if (!area)
1014 			area = find_uta_area(ai->va);
1015 	}
1016 	if (!area) {
1017 		ret = false;
1018 		goto out;
1019 	}
1020 
1021 	if (!tee_pager_unhide_page(page_va)) {
1022 		struct tee_pager_pmem *pmem = NULL;
1023 		uint32_t attr;
1024 
1025 		/*
1026 		 * The page wasn't hidden, but some other core may have
1027 		 * updated the table entry before we got here or we need
1028 		 * to make a read-only page read-write (dirty).
1029 		 */
1030 		if (pager_update_permissions(area, ai, &ret)) {
1031 			/*
1032 			 * Nothing more to do with the abort. The problem
1033 			 * could already have been dealt with from another
1034 			 * core or if ret is false the TA will be paniced.
1035 			 */
1036 			goto out;
1037 		}
1038 
1039 		pmem = tee_pager_get_page(area);
1040 		if (!pmem) {
1041 			abort_print(ai);
1042 			panic();
1043 		}
1044 
1045 		/* load page code & data */
1046 		tee_pager_load_page(area, page_va, pmem->va_alias);
1047 
1048 		/*
1049 		 * We've updated the page using the aliased mapping and
1050 		 * some cache maintenence is now needed if it's an
1051 		 * executable page.
1052 		 *
1053 		 * Since the d-cache is a Physically-indexed,
1054 		 * physically-tagged (PIPT) cache we can clean the aliased
1055 		 * address instead of the real virtual address.
1056 		 *
1057 		 * The i-cache can also be PIPT, but may be something else
1058 		 * to, to keep it simple we invalidate the entire i-cache.
1059 		 * As a future optimization we may invalidate only the
1060 		 * aliased area if it a PIPT cache else the entire cache.
1061 		 */
1062 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1063 			/*
1064 			 * Doing these operations to LoUIS (Level of
1065 			 * unification, Inner Shareable) would be enough
1066 			 */
1067 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
1068 				pmem->va_alias, SMALL_PAGE_SIZE);
1069 
1070 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
1071 		}
1072 
1073 		pmem->area = area;
1074 		pmem->pgidx = area_va2idx(area, ai->va);
1075 		attr = get_area_mattr(area->flags) &
1076 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1077 		area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1078 		pgt_inc_used_entries(area->pgt);
1079 
1080 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1081 		     area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1082 
1083 	}
1084 
1085 	tee_pager_hide_pages();
1086 	ret = true;
1087 out:
1088 	pager_unlock(exceptions);
1089 	return ret;
1090 }
1091 
1092 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1093 {
1094 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1095 	size_t n;
1096 
1097 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1098 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1099 
1100 	/* setup memory */
1101 	for (n = 0; n < npages; n++) {
1102 		struct tee_pager_pmem *pmem;
1103 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1104 		unsigned pgidx = core_mmu_va2idx(ti, va);
1105 		paddr_t pa;
1106 		uint32_t attr;
1107 
1108 		/*
1109 		 * Note that we can only support adding pages in the
1110 		 * valid range of this table info, currently not a problem.
1111 		 */
1112 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1113 
1114 		/* Ignore unmapped pages/blocks */
1115 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1116 			continue;
1117 
1118 		pmem = malloc(sizeof(struct tee_pager_pmem));
1119 		if (!pmem)
1120 			panic("out of mem");
1121 
1122 		pmem->va_alias = pager_add_alias_page(pa);
1123 
1124 		if (unmap) {
1125 			pmem->area = NULL;
1126 			pmem->pgidx = INVALID_PGIDX;
1127 			core_mmu_set_entry(ti, pgidx, 0, 0);
1128 			pgt_dec_used_entries(&pager_core_pgt);
1129 		} else {
1130 			/*
1131 			 * The page is still mapped, let's assign the area
1132 			 * and update the protection bits accordingly.
1133 			 */
1134 			pmem->area = find_area(&tee_pager_area_head, va);
1135 			assert(pmem->area->pgt == &pager_core_pgt);
1136 			pmem->pgidx = pgidx;
1137 			assert(pa == get_pmem_pa(pmem));
1138 			area_set_entry(pmem->area, pgidx, pa,
1139 				       get_area_mattr(pmem->area->flags));
1140 		}
1141 
1142 		tee_pager_npages++;
1143 		incr_npages_all();
1144 		set_npages();
1145 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1146 	}
1147 
1148 	/* Invalidate secure TLB */
1149 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1150 }
1151 
1152 #ifdef CFG_PAGED_USER_TA
1153 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1154 {
1155 	struct pgt *p = pgt;
1156 
1157 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1158 		p = SLIST_NEXT(p, link);
1159 	return p;
1160 }
1161 
1162 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1163 {
1164 	struct tee_pager_area *area;
1165 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1166 
1167 	TAILQ_FOREACH(area, utc->areas, link) {
1168 		if (!area->pgt)
1169 			area->pgt = find_pgt(pgt, area->base);
1170 		else
1171 			assert(area->pgt == find_pgt(pgt, area->base));
1172 		if (!area->pgt)
1173 			panic();
1174 	}
1175 }
1176 
1177 static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)
1178 {
1179 	uint32_t attr;
1180 
1181 	assert(pmem->area && pmem->area->pgt);
1182 
1183 	area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1184 	area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1185 	tee_pager_save_page(pmem, attr);
1186 	assert(pmem->area->pgt->num_used_entries);
1187 	pmem->area->pgt->num_used_entries--;
1188 	pmem->pgidx = INVALID_PGIDX;
1189 	pmem->area = NULL;
1190 }
1191 
1192 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1193 {
1194 	struct tee_pager_pmem *pmem;
1195 	struct tee_pager_area *area;
1196 	uint32_t exceptions = pager_lock();
1197 
1198 	if (!pgt->num_used_entries)
1199 		goto out;
1200 
1201 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1202 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1203 			continue;
1204 		if (pmem->area->pgt == pgt)
1205 			pager_save_and_release_entry(pmem);
1206 	}
1207 	assert(!pgt->num_used_entries);
1208 
1209 out:
1210 	if (is_user_ta_ctx(pgt->ctx)) {
1211 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1212 			if (area->pgt == pgt)
1213 				area->pgt = NULL;
1214 		}
1215 	}
1216 
1217 	pager_unlock(exceptions);
1218 }
1219 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1220 #endif /*CFG_PAGED_USER_TA*/
1221 
1222 void tee_pager_release_phys(void *addr, size_t size)
1223 {
1224 	bool unmaped = false;
1225 	vaddr_t va = (vaddr_t)addr;
1226 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1227 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1228 	struct tee_pager_area *area;
1229 	uint32_t exceptions;
1230 
1231 	if (!size)
1232 		return;
1233 
1234 	area = find_area(&tee_pager_area_head, begin);
1235 	if (!area ||
1236 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1237 		panic();
1238 
1239 	exceptions = pager_lock();
1240 
1241 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1242 		unmaped |= tee_pager_release_one_phys(area, va);
1243 
1244 	/* Invalidate secure TLB */
1245 	if (unmaped)
1246 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1247 
1248 	pager_unlock(exceptions);
1249 }
1250 KEEP_PAGER(tee_pager_release_phys);
1251 
1252 void *tee_pager_alloc(size_t size, uint32_t flags)
1253 {
1254 	tee_mm_entry_t *mm;
1255 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1256 
1257 	if (!size)
1258 		return NULL;
1259 
1260 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1261 	if (!mm)
1262 		return NULL;
1263 
1264 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1265 				f, NULL, NULL);
1266 
1267 	return (void *)tee_mm_get_smem(mm);
1268 }
1269