xref: /optee_os/core/mm/vm.c (revision c04a96a45ffe0e665a4d86e542ec921fae932aa8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <initcall.h>
10 #include <kernel/panic.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tlb_helpers.h>
15 #include <kernel/user_mode_ctx.h>
16 #include <kernel/virtualization.h>
17 #include <mm/core_memprot.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <mm/pgt_cache.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_mmu_types.h>
23 #include <mm/tee_pager.h>
24 #include <mm/vm.h>
25 #include <sm/optee_smc.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_CACHE_CACHED << \
46 					 TEE_MATTR_CACHE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin,
113 			       vaddr_t *end)
114 {
115 	vaddr_t b;
116 	vaddr_t e;
117 
118 	if (TAILQ_EMPTY(&uctx->vm_info.regions)) {
119 		core_mmu_get_user_va_range(&b, NULL);
120 		e = b;
121 	} else {
122 		struct vm_region *r;
123 
124 		b = TAILQ_FIRST(&uctx->vm_info.regions)->va;
125 		r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head);
126 		e = r->va + r->size;
127 		b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE);
128 		e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE);
129 	}
130 
131 	if (begin)
132 		*begin = b;
133 	if (end)
134 		*end = e;
135 	return (e - b) >> CORE_MMU_PGDIR_SHIFT;
136 }
137 
138 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
139 {
140 	struct thread_specific_data *tsd __maybe_unused;
141 	vaddr_t b;
142 	vaddr_t e;
143 	size_t ntbl;
144 
145 	ntbl = get_num_req_pgts(uctx, &b, &e);
146 	if (!pgt_check_avail(ntbl)) {
147 		EMSG("%zu page tables not available", ntbl);
148 		return TEE_ERROR_OUT_OF_MEMORY;
149 	}
150 
151 #ifdef CFG_PAGED_USER_TA
152 	tsd = thread_get_tsd();
153 	if (uctx->ts_ctx == tsd->ctx) {
154 		/*
155 		 * The supplied utc is the current active utc, allocate the
156 		 * page tables too as the pager needs to use them soon.
157 		 */
158 		pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1);
159 	}
160 #endif
161 
162 	return TEE_SUCCESS;
163 }
164 
165 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
166 {
167 	struct thread_specific_data *tsd = thread_get_tsd();
168 	struct pgt_cache *pgt_cache = NULL;
169 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
170 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
171 	struct vm_region *r2 = NULL;
172 
173 	if (uctx->ts_ctx == tsd->ctx)
174 		pgt_cache = &tsd->pgt_cache;
175 
176 	if (mobj_is_paged(r->mobj)) {
177 		tee_pager_rem_um_region(uctx, r->va, r->size);
178 	} else {
179 		pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
180 				    r->va + r->size);
181 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
182 				    uctx->vm_info.asid);
183 	}
184 
185 	r2 = TAILQ_NEXT(r, link);
186 	if (r2)
187 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
188 
189 	r2 = TAILQ_PREV(r, vm_region_head, link);
190 	if (r2)
191 		begin = MAX(begin,
192 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
193 
194 	/* If there's no unused page tables, there's nothing left to do */
195 	if (begin >= last)
196 		return;
197 
198 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
199 }
200 
201 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
202 				  size_t pad_begin, size_t pad_end,
203 				  size_t align)
204 {
205 	struct vm_region dummy_first_reg = { };
206 	struct vm_region dummy_last_reg = { };
207 	struct vm_region *r = NULL;
208 	struct vm_region *prev_r = NULL;
209 	vaddr_t va_range_base = 0;
210 	size_t va_range_size = 0;
211 	size_t granul;
212 	vaddr_t va = 0;
213 	size_t offs_plus_size = 0;
214 
215 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
216 	dummy_first_reg.va = va_range_base;
217 	dummy_last_reg.va = va_range_base + va_range_size;
218 
219 	/* Check alignment, it has to be at least SMALL_PAGE based */
220 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
221 		return TEE_ERROR_ACCESS_CONFLICT;
222 
223 	/* Check that the mobj is defined for the entire range */
224 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
225 		return TEE_ERROR_BAD_PARAMETERS;
226 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
227 		return TEE_ERROR_BAD_PARAMETERS;
228 
229 	granul = MAX(align, SMALL_PAGE_SIZE);
230 	if (!IS_POWER_OF_TWO(granul))
231 		return TEE_ERROR_BAD_PARAMETERS;
232 
233 	prev_r = &dummy_first_reg;
234 	TAILQ_FOREACH(r, &vmi->regions, link) {
235 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
236 					granul);
237 		if (va) {
238 			reg->va = va;
239 			TAILQ_INSERT_BEFORE(r, reg, link);
240 			return TEE_SUCCESS;
241 		}
242 		prev_r = r;
243 	}
244 
245 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
246 	if (!r)
247 		r = &dummy_first_reg;
248 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
249 				granul);
250 	if (va) {
251 		reg->va = va;
252 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
253 		return TEE_SUCCESS;
254 	}
255 
256 	return TEE_ERROR_ACCESS_CONFLICT;
257 }
258 
259 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
260 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
261 		      size_t offs, size_t pad_begin, size_t pad_end,
262 		      size_t align)
263 {
264 	TEE_Result res = TEE_SUCCESS;
265 	struct vm_region *reg = NULL;
266 	uint32_t attr = 0;
267 
268 	if (prot & ~TEE_MATTR_PROT_MASK)
269 		return TEE_ERROR_BAD_PARAMETERS;
270 
271 	reg = calloc(1, sizeof(*reg));
272 	if (!reg)
273 		return TEE_ERROR_OUT_OF_MEMORY;
274 
275 	if (!mobj_is_paged(mobj)) {
276 		uint32_t cattr;
277 
278 		res = mobj_get_cattr(mobj, &cattr);
279 		if (res)
280 			goto err_free_reg;
281 		attr |= cattr << TEE_MATTR_CACHE_SHIFT;
282 	}
283 	attr |= TEE_MATTR_VALID_BLOCK;
284 	if (mobj_is_secure(mobj))
285 		attr |= TEE_MATTR_SECURE;
286 
287 	reg->mobj = mobj_get(mobj);
288 	reg->offset = offs;
289 	reg->va = *va;
290 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
291 	reg->attr = attr | prot;
292 	reg->flags = flags;
293 
294 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
295 	if (res)
296 		goto err_free_reg;
297 
298 	res = alloc_pgt(uctx);
299 	if (res)
300 		goto err_rem_reg;
301 
302 	if (mobj_is_paged(mobj)) {
303 		struct fobj *fobj = mobj_get_fobj(mobj);
304 
305 		if (!fobj) {
306 			res = TEE_ERROR_GENERIC;
307 			goto err_rem_reg;
308 		}
309 
310 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
311 		fobj_put(fobj);
312 		if (res)
313 			goto err_rem_reg;
314 	}
315 
316 	/*
317 	 * If the context currently is active set it again to update
318 	 * the mapping.
319 	 */
320 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
321 		vm_set_ctx(uctx->ts_ctx);
322 
323 	*va = reg->va;
324 
325 	return TEE_SUCCESS;
326 
327 err_rem_reg:
328 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
329 err_free_reg:
330 	mobj_put(reg->mobj);
331 	free(reg);
332 	return res;
333 }
334 
335 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
336 {
337 	struct vm_region *r = NULL;
338 
339 	TAILQ_FOREACH(r, &vm_info->regions, link)
340 		if (va >= r->va && va < r->va + r->size)
341 			return r;
342 
343 	return NULL;
344 }
345 
346 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
347 				   size_t len,
348 				   bool (*cmp_regs)(const struct vm_region *r0,
349 						    const struct vm_region *r,
350 						    const struct vm_region *rn))
351 {
352 	struct vm_region *r = r0;
353 	vaddr_t end_va = 0;
354 
355 	if (ADD_OVERFLOW(va, len, &end_va))
356 		return false;
357 
358 	while (true) {
359 		struct vm_region *r_next = TAILQ_NEXT(r, link);
360 		vaddr_t r_end_va = r->va + r->size;
361 
362 		if (r_end_va >= end_va)
363 			return true;
364 		if (!r_next)
365 			return false;
366 		if (r_end_va != r_next->va)
367 			return false;
368 		if (cmp_regs && !cmp_regs(r0, r, r_next))
369 			return false;
370 		r = r_next;
371 	}
372 }
373 
374 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
375 				  struct vm_region *r, vaddr_t va)
376 {
377 	struct vm_region *r2 = NULL;
378 	size_t diff = va - r->va;
379 
380 	assert(diff && diff < r->size);
381 
382 	r2 = calloc(1, sizeof(*r2));
383 	if (!r2)
384 		return TEE_ERROR_OUT_OF_MEMORY;
385 
386 	if (mobj_is_paged(r->mobj)) {
387 		TEE_Result res = tee_pager_split_um_region(uctx, va);
388 
389 		if (res) {
390 			free(r2);
391 			return res;
392 		}
393 	}
394 
395 	r2->mobj = mobj_get(r->mobj);
396 	r2->offset = r->offset + diff;
397 	r2->va = va;
398 	r2->size = r->size - diff;
399 	r2->attr = r->attr;
400 	r2->flags = r->flags;
401 
402 	r->size = diff;
403 
404 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
405 
406 	return TEE_SUCCESS;
407 }
408 
409 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
410 				 size_t len,
411 				 bool (*cmp_regs)(const struct vm_region *r0,
412 						  const struct vm_region *r,
413 						  const struct vm_region *rn),
414 				 struct vm_region **r0_ret)
415 {
416 	TEE_Result res = TEE_SUCCESS;
417 	struct vm_region *r = NULL;
418 	vaddr_t end_va = 0;
419 
420 	if ((va | len) & SMALL_PAGE_MASK)
421 		return TEE_ERROR_BAD_PARAMETERS;
422 
423 	if (ADD_OVERFLOW(va, len, &end_va))
424 		return TEE_ERROR_BAD_PARAMETERS;
425 
426 	/*
427 	 * Find first vm_region in range and check that the entire range is
428 	 * contiguous.
429 	 */
430 	r = find_vm_region(&uctx->vm_info, va);
431 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
432 		return TEE_ERROR_BAD_PARAMETERS;
433 
434 	/*
435 	 * If needed split regions so that va and len covers only complete
436 	 * regions.
437 	 */
438 	if (va != r->va) {
439 		res = split_vm_region(uctx, r, va);
440 		if (res)
441 			return res;
442 		r = TAILQ_NEXT(r, link);
443 	}
444 
445 	*r0_ret = r;
446 	r = find_vm_region(&uctx->vm_info, va + len - 1);
447 	if (!r)
448 		return TEE_ERROR_BAD_PARAMETERS;
449 	if (end_va != r->va + r->size) {
450 		res = split_vm_region(uctx, r, end_va);
451 		if (res)
452 			return res;
453 	}
454 
455 	return TEE_SUCCESS;
456 }
457 
458 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
459 {
460 	struct vm_region *r_next = NULL;
461 	struct vm_region *r = NULL;
462 	vaddr_t end_va = 0;
463 
464 	if (ADD_OVERFLOW(va, len, &end_va))
465 		return;
466 
467 	tee_pager_merge_um_region(uctx, va, len);
468 
469 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
470 		r_next = TAILQ_NEXT(r, link);
471 		if (!r_next)
472 			return;
473 
474 		/* Try merging with the region just before va */
475 		if (r->va + r->size < va)
476 			continue;
477 
478 		/*
479 		 * If r->va is well past our range we're done.
480 		 * Note that if it's just the page after our range we'll
481 		 * try to merge.
482 		 */
483 		if (r->va > end_va)
484 			return;
485 
486 		if (r->va + r->size != r_next->va)
487 			continue;
488 		if (r->mobj != r_next->mobj ||
489 		    r->flags != r_next->flags ||
490 		    r->attr != r_next->attr)
491 			continue;
492 		if (r->offset + r->size != r_next->offset)
493 			continue;
494 
495 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
496 		r->size += r_next->size;
497 		mobj_put(r_next->mobj);
498 		free(r_next);
499 		r_next = r;
500 	}
501 }
502 
503 static bool cmp_region_for_remap(const struct vm_region *r0,
504 				 const struct vm_region *r,
505 				 const struct vm_region *rn)
506 {
507 	/*
508 	 * All the essentionals has to match for remap to make sense. The
509 	 * essentials are, mobj/fobj, attr, flags and the offset should be
510 	 * contiguous.
511 	 *
512 	 * Note that vm_remap() depends on mobj/fobj to be the same.
513 	 */
514 	return r0->flags == r->flags && r0->attr == r->attr &&
515 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
516 }
517 
518 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
519 		    size_t len, size_t pad_begin, size_t pad_end)
520 {
521 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
522 	TEE_Result res = TEE_SUCCESS;
523 	struct vm_region *r0 = NULL;
524 	struct vm_region *r = NULL;
525 	struct vm_region *r_next = NULL;
526 	struct vm_region *r_last = NULL;
527 	struct vm_region *r_first = NULL;
528 	struct fobj *fobj = NULL;
529 	vaddr_t next_va = 0;
530 
531 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
532 
533 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
534 		return TEE_ERROR_BAD_PARAMETERS;
535 
536 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
537 	if (res)
538 		return res;
539 
540 	if (mobj_is_paged(r0->mobj)) {
541 		fobj = mobj_get_fobj(r0->mobj);
542 		if (!fobj)
543 			panic();
544 	}
545 
546 	for (r = r0; r; r = r_next) {
547 		if (r->va + r->size > old_va + len)
548 			break;
549 		r_next = TAILQ_NEXT(r, link);
550 		rem_um_region(uctx, r);
551 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
552 		TAILQ_INSERT_TAIL(&regs, r, link);
553 	}
554 
555 	/*
556 	 * Synchronize change to translation tables. Even though the pager
557 	 * case unmaps immediately we may still free a translation table.
558 	 */
559 	vm_set_ctx(uctx->ts_ctx);
560 
561 	r_first = TAILQ_FIRST(&regs);
562 	while (!TAILQ_EMPTY(&regs)) {
563 		r = TAILQ_FIRST(&regs);
564 		TAILQ_REMOVE(&regs, r, link);
565 		if (r_last) {
566 			r->va = r_last->va + r_last->size;
567 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
568 		} else {
569 			r->va = *new_va;
570 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
571 					      pad_end + len - r->size, 0);
572 		}
573 		if (!res)
574 			r_last = r;
575 		if (!res)
576 			res = alloc_pgt(uctx);
577 		if (fobj && !res)
578 			res = tee_pager_add_um_region(uctx, r->va, fobj,
579 						      r->attr);
580 
581 		if (res) {
582 			/*
583 			 * Something went wrong move all the recently added
584 			 * regions back to regs for later reinsertion at
585 			 * the original spot.
586 			 */
587 			struct vm_region *r_tmp = NULL;
588 
589 			if (r != r_last) {
590 				/*
591 				 * umap_add_region() failed, move r back to
592 				 * regs before all the rest are moved back.
593 				 */
594 				TAILQ_INSERT_HEAD(&regs, r, link);
595 			}
596 			for (r = r_first; r_last && r != r_last; r = r_next) {
597 				r_next = TAILQ_NEXT(r, link);
598 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
599 				if (r_tmp)
600 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
601 							   link);
602 				else
603 					TAILQ_INSERT_HEAD(&regs, r, link);
604 				r_tmp = r;
605 			}
606 
607 			goto err_restore_map;
608 		}
609 	}
610 
611 	fobj_put(fobj);
612 
613 	vm_set_ctx(uctx->ts_ctx);
614 	*new_va = r_first->va;
615 
616 	return TEE_SUCCESS;
617 
618 err_restore_map:
619 	next_va = old_va;
620 	while (!TAILQ_EMPTY(&regs)) {
621 		r = TAILQ_FIRST(&regs);
622 		TAILQ_REMOVE(&regs, r, link);
623 		r->va = next_va;
624 		next_va += r->size;
625 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
626 			panic("Cannot restore mapping");
627 		if (alloc_pgt(uctx))
628 			panic("Cannot restore mapping");
629 		if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
630 			panic("Cannot restore mapping");
631 	}
632 	fobj_put(fobj);
633 	vm_set_ctx(uctx->ts_ctx);
634 
635 	return res;
636 }
637 
638 static bool cmp_region_for_get_flags(const struct vm_region *r0,
639 				     const struct vm_region *r,
640 				     const struct vm_region *rn __unused)
641 {
642 	return r0->flags == r->flags;
643 }
644 
645 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
646 			uint32_t *flags)
647 {
648 	struct vm_region *r = NULL;
649 
650 	if (!len || ((len | va) & SMALL_PAGE_MASK))
651 		return TEE_ERROR_BAD_PARAMETERS;
652 
653 	r = find_vm_region(&uctx->vm_info, va);
654 	if (!r)
655 		return TEE_ERROR_BAD_PARAMETERS;
656 
657 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
658 		return TEE_ERROR_BAD_PARAMETERS;
659 
660 	*flags = r->flags;
661 
662 	return TEE_SUCCESS;
663 }
664 
665 static bool cmp_region_for_get_prot(const struct vm_region *r0,
666 				    const struct vm_region *r,
667 				    const struct vm_region *rn __unused)
668 {
669 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
670 	       (r->attr & TEE_MATTR_PROT_MASK);
671 }
672 
673 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
674 		       uint16_t *prot)
675 {
676 	struct vm_region *r = NULL;
677 
678 	if (!len || ((len | va) & SMALL_PAGE_MASK))
679 		return TEE_ERROR_BAD_PARAMETERS;
680 
681 	r = find_vm_region(&uctx->vm_info, va);
682 	if (!r)
683 		return TEE_ERROR_BAD_PARAMETERS;
684 
685 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
686 		return TEE_ERROR_BAD_PARAMETERS;
687 
688 	*prot = r->attr & TEE_MATTR_PROT_MASK;
689 
690 	return TEE_SUCCESS;
691 }
692 
693 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
694 		       uint32_t prot)
695 {
696 	TEE_Result res = TEE_SUCCESS;
697 	struct vm_region *r0 = NULL;
698 	struct vm_region *r = NULL;
699 	bool was_writeable = false;
700 	bool need_sync = false;
701 
702 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
703 
704 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	res = split_vm_range(uctx, va, len, NULL, &r0);
708 	if (res)
709 		return res;
710 
711 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
712 		if (r->va + r->size > va + len)
713 			break;
714 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
715 			was_writeable = true;
716 
717 		if (!mobj_is_paged(r->mobj))
718 			need_sync = true;
719 
720 		r->attr &= ~TEE_MATTR_PROT_MASK;
721 		r->attr |= prot;
722 	}
723 
724 	if (need_sync) {
725 		/* Synchronize changes to translation tables */
726 		vm_set_ctx(uctx->ts_ctx);
727 	}
728 
729 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
730 		if (r->va + r->size > va + len)
731 			break;
732 		if (mobj_is_paged(r->mobj)) {
733 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
734 							  prot))
735 				panic();
736 		} else if (was_writeable) {
737 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
738 				       r->size);
739 		}
740 
741 	}
742 	if (need_sync && was_writeable)
743 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
744 
745 	merge_vm_range(uctx, va, len);
746 
747 	return TEE_SUCCESS;
748 }
749 
750 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
751 {
752 	TAILQ_REMOVE(&vmi->regions, reg, link);
753 	mobj_put(reg->mobj);
754 	free(reg);
755 }
756 
757 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
758 {
759 	TEE_Result res = TEE_SUCCESS;
760 	struct vm_region *r = NULL;
761 	struct vm_region *r_next = NULL;
762 	size_t end_va = 0;
763 	size_t unmap_end_va = 0;
764 	size_t l = 0;
765 
766 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
767 
768 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
769 		return TEE_ERROR_BAD_PARAMETERS;
770 
771 	if (!l || (va & SMALL_PAGE_MASK))
772 		return TEE_ERROR_BAD_PARAMETERS;
773 
774 	if (ADD_OVERFLOW(va, l, &end_va))
775 		return TEE_ERROR_BAD_PARAMETERS;
776 
777 	res = split_vm_range(uctx, va, l, NULL, &r);
778 	if (res)
779 		return res;
780 
781 	while (true) {
782 		r_next = TAILQ_NEXT(r, link);
783 		unmap_end_va = r->va + r->size;
784 		rem_um_region(uctx, r);
785 		umap_remove_region(&uctx->vm_info, r);
786 		if (!r_next || unmap_end_va == end_va)
787 			break;
788 		r = r_next;
789 	}
790 
791 	return TEE_SUCCESS;
792 }
793 
794 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
795 {
796 	TEE_Result res;
797 	struct mobj *mobj;
798 	size_t offs;
799 	vaddr_t va;
800 	size_t sz;
801 
802 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
803 	if (sz) {
804 		res = vm_map(uctx, &va, sz, TEE_MATTR_PRX, VM_FLAG_PERMANENT,
805 			     mobj, offs);
806 		if (res)
807 			return res;
808 	}
809 
810 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
811 	if (sz)
812 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
813 			      mobj, offs);
814 
815 	return TEE_SUCCESS;
816 }
817 
818 TEE_Result vm_info_init(struct user_mode_ctx *uctx)
819 {
820 	TEE_Result res;
821 	uint32_t asid = asid_alloc();
822 
823 	if (!asid) {
824 		DMSG("Failed to allocate ASID");
825 		return TEE_ERROR_GENERIC;
826 	}
827 
828 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
829 	TAILQ_INIT(&uctx->vm_info.regions);
830 	uctx->vm_info.asid = asid;
831 
832 	res = map_kinit(uctx);
833 	if (res)
834 		vm_info_final(uctx);
835 	return res;
836 }
837 
838 void vm_clean_param(struct user_mode_ctx *uctx)
839 {
840 	struct vm_region *next_r;
841 	struct vm_region *r;
842 
843 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
844 		if (r->flags & VM_FLAG_EPHEMERAL) {
845 			rem_um_region(uctx, r);
846 			umap_remove_region(&uctx->vm_info, r);
847 		}
848 	}
849 }
850 
851 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
852 {
853 	struct vm_region *r = NULL;
854 
855 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
856 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
857 }
858 
859 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
860 				       struct param_mem *mem, void **user_va)
861 {
862 	struct vm_region *region = NULL;
863 
864 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
865 		vaddr_t va = 0;
866 		size_t phys_offs = 0;
867 
868 		if (!(region->flags & VM_FLAG_EPHEMERAL))
869 			continue;
870 		if (mem->mobj != region->mobj)
871 			continue;
872 
873 		phys_offs = mobj_get_phys_offs(mem->mobj,
874 					       CORE_MMU_USER_PARAM_SIZE);
875 		phys_offs += mem->offs;
876 		if (phys_offs < region->offset)
877 			continue;
878 		if (phys_offs >= (region->offset + region->size))
879 			continue;
880 		va = region->va + phys_offs - region->offset;
881 		*user_va = (void *)va;
882 		return TEE_SUCCESS;
883 	}
884 	return TEE_ERROR_GENERIC;
885 }
886 
887 static int cmp_param_mem(const void *a0, const void *a1)
888 {
889 	const struct param_mem *m1 = a1;
890 	const struct param_mem *m0 = a0;
891 	int ret;
892 
893 	/* Make sure that invalid param_mem are placed last in the array */
894 	if (!m0->mobj && !m1->mobj)
895 		return 0;
896 	if (!m0->mobj)
897 		return 1;
898 	if (!m1->mobj)
899 		return -1;
900 
901 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
902 	if (ret)
903 		return ret;
904 
905 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
906 	if (ret)
907 		return ret;
908 
909 	ret = CMP_TRILEAN(m0->offs, m1->offs);
910 	if (ret)
911 		return ret;
912 
913 	return CMP_TRILEAN(m0->size, m1->size);
914 }
915 
916 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
917 			void *param_va[TEE_NUM_PARAMS])
918 {
919 	TEE_Result res = TEE_SUCCESS;
920 	size_t n;
921 	size_t m;
922 	struct param_mem mem[TEE_NUM_PARAMS];
923 
924 	memset(mem, 0, sizeof(mem));
925 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
926 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
927 		size_t phys_offs;
928 
929 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
930 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
931 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
932 			continue;
933 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
934 					       CORE_MMU_USER_PARAM_SIZE);
935 		mem[n].mobj = param->u[n].mem.mobj;
936 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
937 					CORE_MMU_USER_PARAM_SIZE);
938 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
939 				      mem[n].offs + param->u[n].mem.size,
940 				      CORE_MMU_USER_PARAM_SIZE);
941 		/*
942 		 * For size 0 (raw pointer parameter), add minimum size
943 		 * value to allow address to be mapped
944 		 */
945 		if (!mem[n].size)
946 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
947 	}
948 
949 	/*
950 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
951 	 * mobj pointer value since those entries can't be merged either,
952 	 * finally by offset.
953 	 *
954 	 * This should result in a list where all mergeable entries are
955 	 * next to each other and unused/invalid entries are at the end.
956 	 */
957 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
958 
959 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
960 		if (mem[n].mobj == mem[m].mobj &&
961 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
962 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
963 					      mem[n].offs, mem[n].size))) {
964 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
965 			continue;
966 		}
967 		m++;
968 		if (n != m)
969 			mem[m] = mem[n];
970 	}
971 	/*
972 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
973 	 * index of the last valid entry if the first entry is valid, else
974 	 * 0.
975 	 */
976 	if (mem[0].mobj)
977 		m++;
978 
979 	check_param_map_empty(uctx);
980 
981 	for (n = 0; n < m; n++) {
982 		vaddr_t va = 0;
983 
984 		res = vm_map(uctx, &va, mem[n].size,
985 			     TEE_MATTR_PRW | TEE_MATTR_URW,
986 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
987 			     mem[n].mobj, mem[n].offs);
988 		if (res)
989 			goto out;
990 	}
991 
992 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
993 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
994 
995 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
996 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
997 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
998 			continue;
999 		if (!param->u[n].mem.mobj)
1000 			continue;
1001 
1002 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1003 					   param_va + n);
1004 		if (res != TEE_SUCCESS)
1005 			goto out;
1006 	}
1007 
1008 	res = alloc_pgt(uctx);
1009 out:
1010 	if (res)
1011 		vm_clean_param(uctx);
1012 
1013 	return res;
1014 }
1015 
1016 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1017 			vaddr_t *va)
1018 {
1019 	TEE_Result res;
1020 	struct vm_region *reg = calloc(1, sizeof(*reg));
1021 
1022 	if (!reg)
1023 		return TEE_ERROR_OUT_OF_MEMORY;
1024 
1025 	reg->mobj = mobj;
1026 	reg->offset = 0;
1027 	reg->va = 0;
1028 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1029 	if (mobj_is_secure(mobj))
1030 		reg->attr = TEE_MATTR_SECURE;
1031 	else
1032 		reg->attr = 0;
1033 
1034 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1035 	if (res) {
1036 		free(reg);
1037 		return res;
1038 	}
1039 
1040 	res = alloc_pgt(uctx);
1041 	if (res)
1042 		umap_remove_region(&uctx->vm_info, reg);
1043 	else
1044 		*va = reg->va;
1045 
1046 	return res;
1047 }
1048 
1049 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1050 {
1051 	struct vm_region *r = NULL;
1052 
1053 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1054 		if (r->mobj == mobj && r->va == va) {
1055 			rem_um_region(uctx, r);
1056 			umap_remove_region(&uctx->vm_info, r);
1057 			return;
1058 		}
1059 	}
1060 }
1061 
1062 void vm_info_final(struct user_mode_ctx *uctx)
1063 {
1064 	if (!uctx->vm_info.asid)
1065 		return;
1066 
1067 	/* clear MMU entries to avoid clash when asid is reused */
1068 	tlbi_asid(uctx->vm_info.asid);
1069 
1070 	asid_free(uctx->vm_info.asid);
1071 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1072 		umap_remove_region(&uctx->vm_info,
1073 				   TAILQ_FIRST(&uctx->vm_info.regions));
1074 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1075 }
1076 
1077 /* return true only if buffer fits inside TA private memory */
1078 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1079 				 const void *va, size_t size)
1080 {
1081 	struct vm_region *r = NULL;
1082 
1083 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1084 		if (r->flags & VM_FLAGS_NONPRIV)
1085 			continue;
1086 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1087 			return true;
1088 	}
1089 
1090 	return false;
1091 }
1092 
1093 /* return true only if buffer intersects TA private memory */
1094 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1095 				  const void *va, size_t size)
1096 {
1097 	struct vm_region *r = NULL;
1098 
1099 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1100 		if (r->attr & VM_FLAGS_NONPRIV)
1101 			continue;
1102 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1103 			return true;
1104 	}
1105 
1106 	return false;
1107 }
1108 
1109 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1110 			       const void *va, size_t size,
1111 			       struct mobj **mobj, size_t *offs)
1112 {
1113 	struct vm_region *r = NULL;
1114 
1115 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1116 		if (!r->mobj)
1117 			continue;
1118 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1119 			size_t poffs;
1120 
1121 			poffs = mobj_get_phys_offs(r->mobj,
1122 						   CORE_MMU_USER_PARAM_SIZE);
1123 			*mobj = r->mobj;
1124 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1125 			return TEE_SUCCESS;
1126 		}
1127 	}
1128 
1129 	return TEE_ERROR_BAD_PARAMETERS;
1130 }
1131 
1132 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1133 					  void *ua, paddr_t *pa, uint32_t *attr)
1134 {
1135 	struct vm_region *region = NULL;
1136 
1137 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1138 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1139 					   region->size))
1140 			continue;
1141 
1142 		if (pa) {
1143 			TEE_Result res;
1144 			paddr_t p;
1145 			size_t offset;
1146 			size_t granule;
1147 
1148 			/*
1149 			 * mobj and input user address may each include
1150 			 * a specific offset-in-granule position.
1151 			 * Drop both to get target physical page base
1152 			 * address then apply only user address
1153 			 * offset-in-granule.
1154 			 * Mapping lowest granule is the small page.
1155 			 */
1156 			granule = MAX(region->mobj->phys_granule,
1157 				      (size_t)SMALL_PAGE_SIZE);
1158 			assert(!granule || IS_POWER_OF_TWO(granule));
1159 
1160 			offset = region->offset +
1161 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1162 
1163 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1164 			if (res != TEE_SUCCESS)
1165 				return res;
1166 
1167 			*pa = p | ((vaddr_t)ua & (granule - 1));
1168 		}
1169 		if (attr)
1170 			*attr = region->attr;
1171 
1172 		return TEE_SUCCESS;
1173 	}
1174 
1175 	return TEE_ERROR_ACCESS_DENIED;
1176 }
1177 
1178 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1179 {
1180 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1181 }
1182 
1183 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa)
1184 {
1185 	paddr_t p = 0;
1186 	struct vm_region *region = NULL;
1187 
1188 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1189 		size_t granule = 0;
1190 		size_t size = 0;
1191 		size_t ofs = 0;
1192 
1193 		/* pa2va is expected only for memory tracked through mobj */
1194 		if (!region->mobj)
1195 			continue;
1196 
1197 		/* Physically granulated memory object must be scanned */
1198 		granule = region->mobj->phys_granule;
1199 		assert(!granule || IS_POWER_OF_TWO(granule));
1200 
1201 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1202 
1203 			if (granule) {
1204 				/* From current offset to buffer/granule end */
1205 				size = granule - (ofs & (granule - 1));
1206 
1207 				if (size > (region->size - ofs))
1208 					size = region->size - ofs;
1209 			} else {
1210 				size = region->size;
1211 			}
1212 
1213 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1214 				continue;
1215 
1216 			if (core_is_buffer_inside(pa, 1, p, size)) {
1217 				/* Remove region offset (mobj phys offset) */
1218 				ofs -= region->offset;
1219 				/* Get offset-in-granule */
1220 				p = pa - p;
1221 
1222 				return (void *)(region->va + ofs + (vaddr_t)p);
1223 			}
1224 		}
1225 	}
1226 
1227 	return NULL;
1228 }
1229 
1230 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1231 				  uint32_t flags, uaddr_t uaddr, size_t len)
1232 {
1233 	uaddr_t a = 0;
1234 	uaddr_t end_addr = 0;
1235 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1236 			       CORE_MMU_USER_PARAM_SIZE);
1237 
1238 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1239 		return TEE_ERROR_ACCESS_DENIED;
1240 
1241 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1242 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1243 		return TEE_ERROR_ACCESS_DENIED;
1244 
1245 	/*
1246 	 * Rely on TA private memory test to check if address range is private
1247 	 * to TA or not.
1248 	 */
1249 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1250 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1251 		return TEE_ERROR_ACCESS_DENIED;
1252 
1253 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1254 		uint32_t attr;
1255 		TEE_Result res;
1256 
1257 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1258 		if (res != TEE_SUCCESS)
1259 			return res;
1260 
1261 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1262 		    (attr & TEE_MATTR_SECURE))
1263 			return TEE_ERROR_ACCESS_DENIED;
1264 
1265 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1266 		    !(attr & TEE_MATTR_SECURE))
1267 			return TEE_ERROR_ACCESS_DENIED;
1268 
1269 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1270 			return TEE_ERROR_ACCESS_DENIED;
1271 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1272 			return TEE_ERROR_ACCESS_DENIED;
1273 	}
1274 
1275 	return TEE_SUCCESS;
1276 }
1277 
1278 void vm_set_ctx(struct ts_ctx *ctx)
1279 {
1280 	struct thread_specific_data *tsd = thread_get_tsd();
1281 
1282 	core_mmu_set_user_map(NULL);
1283 	/*
1284 	 * No matter what happens below, the current user TA will not be
1285 	 * current any longer. Make sure pager is in sync with that.
1286 	 * This function has to be called before there's a chance that
1287 	 * pgt_free_unlocked() is called.
1288 	 *
1289 	 * Save translation tables in a cache if it's a user TA.
1290 	 */
1291 	pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx));
1292 
1293 	if (is_user_mode_ctx(ctx)) {
1294 		struct core_mmu_user_map map = { };
1295 		struct user_mode_ctx *uctx = to_user_mode_ctx(ctx);
1296 
1297 		core_mmu_create_user_map(uctx, &map);
1298 		core_mmu_set_user_map(&map);
1299 		tee_pager_assign_um_tables(uctx);
1300 	}
1301 	tsd->ctx = ctx;
1302 }
1303 
1304