xref: /optee_os/core/mm/vm.c (revision 5118efbe82358fd69fda6e0158a30e59f59ba09d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <initcall.h>
10 #include <kernel/panic.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tlb_helpers.h>
15 #include <kernel/user_mode_ctx.h>
16 #include <kernel/virtualization.h>
17 #include <mm/core_memprot.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <mm/pgt_cache.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_mmu_types.h>
23 #include <mm/tee_pager.h>
24 #include <mm/vm.h>
25 #include <sm/optee_smc.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_CACHE_CACHED << \
46 					 TEE_MATTR_CACHE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static size_t get_num_req_pgts(struct user_mode_ctx *uctx, vaddr_t *begin,
113 			       vaddr_t *end)
114 {
115 	vaddr_t b;
116 	vaddr_t e;
117 
118 	if (TAILQ_EMPTY(&uctx->vm_info.regions)) {
119 		core_mmu_get_user_va_range(&b, NULL);
120 		e = b;
121 	} else {
122 		struct vm_region *r;
123 
124 		b = TAILQ_FIRST(&uctx->vm_info.regions)->va;
125 		r = TAILQ_LAST(&uctx->vm_info.regions, vm_region_head);
126 		e = r->va + r->size;
127 		b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE);
128 		e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE);
129 	}
130 
131 	if (begin)
132 		*begin = b;
133 	if (end)
134 		*end = e;
135 	return (e - b) >> CORE_MMU_PGDIR_SHIFT;
136 }
137 
138 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
139 {
140 	struct thread_specific_data *tsd __maybe_unused;
141 	vaddr_t b;
142 	vaddr_t e;
143 	size_t ntbl;
144 
145 	ntbl = get_num_req_pgts(uctx, &b, &e);
146 	if (!pgt_check_avail(ntbl)) {
147 		EMSG("%zu page tables not available", ntbl);
148 		return TEE_ERROR_OUT_OF_MEMORY;
149 	}
150 
151 #ifdef CFG_PAGED_USER_TA
152 	tsd = thread_get_tsd();
153 	if (uctx->ts_ctx == tsd->ctx) {
154 		/*
155 		 * The supplied utc is the current active utc, allocate the
156 		 * page tables too as the pager needs to use them soon.
157 		 */
158 		pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, b, e - 1);
159 	}
160 #endif
161 
162 	return TEE_SUCCESS;
163 }
164 
165 static void maybe_free_pgt(struct user_mode_ctx *uctx, struct vm_region *r)
166 {
167 	struct thread_specific_data *tsd = NULL;
168 	struct pgt_cache *pgt_cache = NULL;
169 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
170 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
171 	struct vm_region *r2 = NULL;
172 
173 	r2 = TAILQ_NEXT(r, link);
174 	if (r2)
175 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
176 
177 	r2 = TAILQ_PREV(r, vm_region_head, link);
178 	if (r2)
179 		begin = MAX(begin,
180 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
181 
182 	/* If there's no unused page tables, there's nothing left to do */
183 	if (begin >= last)
184 		return;
185 
186 	tsd = thread_get_tsd();
187 	if (uctx->ts_ctx == tsd->ctx)
188 		pgt_cache = &tsd->pgt_cache;
189 
190 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
191 }
192 
193 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
194 				  size_t pad_begin, size_t pad_end,
195 				  size_t align)
196 {
197 	struct vm_region dummy_first_reg = { };
198 	struct vm_region dummy_last_reg = { };
199 	struct vm_region *r = NULL;
200 	struct vm_region *prev_r = NULL;
201 	vaddr_t va_range_base = 0;
202 	size_t va_range_size = 0;
203 	size_t granul;
204 	vaddr_t va = 0;
205 	size_t offs_plus_size = 0;
206 
207 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
208 	dummy_first_reg.va = va_range_base;
209 	dummy_last_reg.va = va_range_base + va_range_size;
210 
211 	/* Check alignment, it has to be at least SMALL_PAGE based */
212 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
213 		return TEE_ERROR_ACCESS_CONFLICT;
214 
215 	/* Check that the mobj is defined for the entire range */
216 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
217 		return TEE_ERROR_BAD_PARAMETERS;
218 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
219 		return TEE_ERROR_BAD_PARAMETERS;
220 
221 	granul = MAX(align, SMALL_PAGE_SIZE);
222 	if (!IS_POWER_OF_TWO(granul))
223 		return TEE_ERROR_BAD_PARAMETERS;
224 
225 	prev_r = &dummy_first_reg;
226 	TAILQ_FOREACH(r, &vmi->regions, link) {
227 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
228 					granul);
229 		if (va) {
230 			reg->va = va;
231 			TAILQ_INSERT_BEFORE(r, reg, link);
232 			return TEE_SUCCESS;
233 		}
234 		prev_r = r;
235 	}
236 
237 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
238 	if (!r)
239 		r = &dummy_first_reg;
240 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
241 				granul);
242 	if (va) {
243 		reg->va = va;
244 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
245 		return TEE_SUCCESS;
246 	}
247 
248 	return TEE_ERROR_ACCESS_CONFLICT;
249 }
250 
251 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
252 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
253 		      size_t offs, size_t pad_begin, size_t pad_end,
254 		      size_t align)
255 {
256 	TEE_Result res = TEE_SUCCESS;
257 	struct vm_region *reg = NULL;
258 	uint32_t attr = 0;
259 
260 	if (prot & ~TEE_MATTR_PROT_MASK)
261 		return TEE_ERROR_BAD_PARAMETERS;
262 
263 	reg = calloc(1, sizeof(*reg));
264 	if (!reg)
265 		return TEE_ERROR_OUT_OF_MEMORY;
266 
267 	if (!mobj_is_paged(mobj)) {
268 		uint32_t cattr;
269 
270 		res = mobj_get_cattr(mobj, &cattr);
271 		if (res)
272 			goto err_free_reg;
273 		attr |= cattr << TEE_MATTR_CACHE_SHIFT;
274 	}
275 	attr |= TEE_MATTR_VALID_BLOCK;
276 	if (mobj_is_secure(mobj))
277 		attr |= TEE_MATTR_SECURE;
278 
279 	reg->mobj = mobj_get(mobj);
280 	reg->offset = offs;
281 	reg->va = *va;
282 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
283 	reg->attr = attr | prot;
284 	reg->flags = flags;
285 
286 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
287 	if (res)
288 		goto err_free_reg;
289 
290 	res = alloc_pgt(uctx);
291 	if (res)
292 		goto err_rem_reg;
293 
294 	if (mobj_is_paged(mobj)) {
295 		struct fobj *fobj = mobj_get_fobj(mobj);
296 
297 		if (!fobj) {
298 			res = TEE_ERROR_GENERIC;
299 			goto err_rem_reg;
300 		}
301 
302 		res = tee_pager_add_um_area(uctx, reg->va, fobj, prot);
303 		fobj_put(fobj);
304 		if (res)
305 			goto err_rem_reg;
306 	}
307 
308 	/*
309 	 * If the context currently is active set it again to update
310 	 * the mapping.
311 	 */
312 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
313 		vm_set_ctx(uctx->ts_ctx);
314 
315 	*va = reg->va;
316 
317 	return TEE_SUCCESS;
318 
319 err_rem_reg:
320 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
321 err_free_reg:
322 	mobj_put(reg->mobj);
323 	free(reg);
324 	return res;
325 }
326 
327 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
328 {
329 	struct vm_region *r = NULL;
330 
331 	TAILQ_FOREACH(r, &vm_info->regions, link)
332 		if (va >= r->va && va < r->va + r->size)
333 			return r;
334 
335 	return NULL;
336 }
337 
338 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
339 				   size_t len,
340 				   bool (*cmp_regs)(const struct vm_region *r0,
341 						    const struct vm_region *r,
342 						    const struct vm_region *rn))
343 {
344 	struct vm_region *r = r0;
345 	vaddr_t end_va = 0;
346 
347 	if (ADD_OVERFLOW(va, len, &end_va))
348 		return false;
349 
350 	while (true) {
351 		struct vm_region *r_next = TAILQ_NEXT(r, link);
352 		vaddr_t r_end_va = r->va + r->size;
353 
354 		if (r_end_va >= end_va)
355 			return true;
356 		if (!r_next)
357 			return false;
358 		if (r_end_va != r_next->va)
359 			return false;
360 		if (cmp_regs && !cmp_regs(r0, r, r_next))
361 			return false;
362 		r = r_next;
363 	}
364 }
365 
366 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
367 				  struct vm_region *r, vaddr_t va)
368 {
369 	struct vm_region *r2 = NULL;
370 	size_t diff = va - r->va;
371 
372 	assert(diff && diff < r->size);
373 
374 	r2 = calloc(1, sizeof(*r2));
375 	if (!r2)
376 		return TEE_ERROR_OUT_OF_MEMORY;
377 
378 	if (mobj_is_paged(r->mobj)) {
379 		TEE_Result res = tee_pager_split_um_region(uctx, va);
380 
381 		if (res) {
382 			free(r2);
383 			return res;
384 		}
385 	}
386 
387 	r2->mobj = mobj_get(r->mobj);
388 	r2->offset = r->offset + diff;
389 	r2->va = va;
390 	r2->size = r->size - diff;
391 	r2->attr = r->attr;
392 	r2->flags = r->flags;
393 
394 	r->size = diff;
395 
396 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
397 
398 	return TEE_SUCCESS;
399 }
400 
401 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
402 				 size_t len,
403 				 bool (*cmp_regs)(const struct vm_region *r0,
404 						  const struct vm_region *r,
405 						  const struct vm_region *rn),
406 				 struct vm_region **r0_ret)
407 {
408 	TEE_Result res = TEE_SUCCESS;
409 	struct vm_region *r = NULL;
410 	vaddr_t end_va = 0;
411 
412 	if ((va | len) & SMALL_PAGE_MASK)
413 		return TEE_ERROR_BAD_PARAMETERS;
414 
415 	if (ADD_OVERFLOW(va, len, &end_va))
416 		return TEE_ERROR_BAD_PARAMETERS;
417 
418 	/*
419 	 * Find first vm_region in range and check that the entire range is
420 	 * contiguous.
421 	 */
422 	r = find_vm_region(&uctx->vm_info, va);
423 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
424 		return TEE_ERROR_BAD_PARAMETERS;
425 
426 	/*
427 	 * If needed split regions so that va and len covers only complete
428 	 * regions.
429 	 */
430 	if (va != r->va) {
431 		res = split_vm_region(uctx, r, va);
432 		if (res)
433 			return res;
434 		r = TAILQ_NEXT(r, link);
435 	}
436 
437 	*r0_ret = r;
438 	r = find_vm_region(&uctx->vm_info, va + len - 1);
439 	if (!r)
440 		return TEE_ERROR_BAD_PARAMETERS;
441 	if (end_va != r->va + r->size) {
442 		res = split_vm_region(uctx, r, end_va);
443 		if (res)
444 			return res;
445 	}
446 
447 	return TEE_SUCCESS;
448 }
449 
450 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
451 {
452 	struct vm_region *r_next = NULL;
453 	struct vm_region *r = NULL;
454 	vaddr_t end_va = 0;
455 
456 	if (ADD_OVERFLOW(va, len, &end_va))
457 		return;
458 
459 	tee_pager_merge_um_region(uctx, va, len);
460 
461 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
462 		r_next = TAILQ_NEXT(r, link);
463 		if (!r_next)
464 			return;
465 
466 		/* Try merging with the region just before va */
467 		if (r->va + r->size < va)
468 			continue;
469 
470 		/*
471 		 * If r->va is well past our range we're done.
472 		 * Note that if it's just the page after our range we'll
473 		 * try to merge.
474 		 */
475 		if (r->va > end_va)
476 			return;
477 
478 		if (r->va + r->size != r_next->va)
479 			continue;
480 		if (r->mobj != r_next->mobj ||
481 		    r->flags != r_next->flags ||
482 		    r->attr != r_next->attr)
483 			continue;
484 		if (r->offset + r->size != r_next->offset)
485 			continue;
486 
487 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
488 		r->size += r_next->size;
489 		mobj_put(r_next->mobj);
490 		free(r_next);
491 		r_next = r;
492 	}
493 }
494 
495 static bool cmp_region_for_remap(const struct vm_region *r0,
496 				 const struct vm_region *r,
497 				 const struct vm_region *rn)
498 {
499 	/*
500 	 * All the essentionals has to match for remap to make sense. The
501 	 * essentials are, mobj/fobj, attr, flags and the offset should be
502 	 * contiguous.
503 	 *
504 	 * Note that vm_remap() depends on mobj/fobj to be the same.
505 	 */
506 	return r0->flags == r->flags && r0->attr == r->attr &&
507 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
508 }
509 
510 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
511 		    size_t len, size_t pad_begin, size_t pad_end)
512 {
513 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
514 	TEE_Result res = TEE_SUCCESS;
515 	struct vm_region *r0 = NULL;
516 	struct vm_region *r = NULL;
517 	struct vm_region *r_next = NULL;
518 	struct vm_region *r_last = NULL;
519 	struct vm_region *r_first = NULL;
520 	struct fobj *fobj = NULL;
521 	vaddr_t next_va = 0;
522 
523 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
524 
525 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
526 		return TEE_ERROR_BAD_PARAMETERS;
527 
528 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
529 	if (res)
530 		return res;
531 
532 	if (mobj_is_paged(r0->mobj)) {
533 		fobj = mobj_get_fobj(r0->mobj);
534 		if (!fobj)
535 			panic();
536 	}
537 
538 	for (r = r0; r; r = r_next) {
539 		if (r->va + r->size > old_va + len)
540 			break;
541 		r_next = TAILQ_NEXT(r, link);
542 		if (fobj)
543 			tee_pager_rem_um_region(uctx, r->va, r->size);
544 		maybe_free_pgt(uctx, r);
545 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
546 		TAILQ_INSERT_TAIL(&regs, r, link);
547 	}
548 
549 	/*
550 	 * Synchronize change to translation tables. Even though the pager
551 	 * case unmaps immediately we may still free a translation table.
552 	 */
553 	vm_set_ctx(uctx->ts_ctx);
554 
555 	r_first = TAILQ_FIRST(&regs);
556 	while (!TAILQ_EMPTY(&regs)) {
557 		r = TAILQ_FIRST(&regs);
558 		TAILQ_REMOVE(&regs, r, link);
559 		if (r_last) {
560 			r->va = r_last->va + r_last->size;
561 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
562 		} else {
563 			r->va = *new_va;
564 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
565 					      pad_end + len - r->size, 0);
566 		}
567 		if (!res)
568 			r_last = r;
569 		if (!res)
570 			res = alloc_pgt(uctx);
571 		if (fobj && !res)
572 			res = tee_pager_add_um_area(uctx, r->va, fobj, r->attr);
573 
574 		if (res) {
575 			/*
576 			 * Something went wrong move all the recently added
577 			 * regions back to regs for later reinsertion at
578 			 * the original spot.
579 			 */
580 			struct vm_region *r_tmp = NULL;
581 
582 			if (r != r_last) {
583 				/*
584 				 * umap_add_region() failed, move r back to
585 				 * regs before all the rest are moved back.
586 				 */
587 				TAILQ_INSERT_HEAD(&regs, r, link);
588 			}
589 			for (r = r_first; r_last && r != r_last; r = r_next) {
590 				r_next = TAILQ_NEXT(r, link);
591 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
592 				if (r_tmp)
593 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
594 							   link);
595 				else
596 					TAILQ_INSERT_HEAD(&regs, r, link);
597 				r_tmp = r;
598 			}
599 
600 			goto err_restore_map;
601 		}
602 	}
603 
604 	fobj_put(fobj);
605 
606 	vm_set_ctx(uctx->ts_ctx);
607 	*new_va = r_first->va;
608 
609 	return TEE_SUCCESS;
610 
611 err_restore_map:
612 	next_va = old_va;
613 	while (!TAILQ_EMPTY(&regs)) {
614 		r = TAILQ_FIRST(&regs);
615 		TAILQ_REMOVE(&regs, r, link);
616 		r->va = next_va;
617 		next_va += r->size;
618 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
619 			panic("Cannot restore mapping");
620 		if (alloc_pgt(uctx))
621 			panic("Cannot restore mapping");
622 		if (fobj && tee_pager_add_um_area(uctx, r->va, fobj, r->attr))
623 			panic("Cannot restore mapping");
624 	}
625 	fobj_put(fobj);
626 	vm_set_ctx(uctx->ts_ctx);
627 
628 	return res;
629 }
630 
631 static bool cmp_region_for_get_flags(const struct vm_region *r0,
632 				     const struct vm_region *r,
633 				     const struct vm_region *rn __unused)
634 {
635 	return r0->flags == r->flags;
636 }
637 
638 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
639 			uint32_t *flags)
640 {
641 	struct vm_region *r = NULL;
642 
643 	if (!len || ((len | va) & SMALL_PAGE_MASK))
644 		return TEE_ERROR_BAD_PARAMETERS;
645 
646 	r = find_vm_region(&uctx->vm_info, va);
647 	if (!r)
648 		return TEE_ERROR_BAD_PARAMETERS;
649 
650 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
651 		return TEE_ERROR_BAD_PARAMETERS;
652 
653 	*flags = r->flags;
654 
655 	return TEE_SUCCESS;
656 }
657 
658 static bool cmp_region_for_get_prot(const struct vm_region *r0,
659 				    const struct vm_region *r,
660 				    const struct vm_region *rn __unused)
661 {
662 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
663 	       (r->attr & TEE_MATTR_PROT_MASK);
664 }
665 
666 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
667 		       uint16_t *prot)
668 {
669 	struct vm_region *r = NULL;
670 
671 	if (!len || ((len | va) & SMALL_PAGE_MASK))
672 		return TEE_ERROR_BAD_PARAMETERS;
673 
674 	r = find_vm_region(&uctx->vm_info, va);
675 	if (!r)
676 		return TEE_ERROR_BAD_PARAMETERS;
677 
678 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
679 		return TEE_ERROR_BAD_PARAMETERS;
680 
681 	*prot = r->attr & TEE_MATTR_PROT_MASK;
682 
683 	return TEE_SUCCESS;
684 }
685 
686 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
687 		       uint32_t prot)
688 {
689 	TEE_Result res = TEE_SUCCESS;
690 	struct vm_region *r0 = NULL;
691 	struct vm_region *r = NULL;
692 	bool was_writeable = false;
693 	bool need_sync = false;
694 
695 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
696 
697 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
698 		return TEE_ERROR_BAD_PARAMETERS;
699 
700 	res = split_vm_range(uctx, va, len, NULL, &r0);
701 	if (res)
702 		return res;
703 
704 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
705 		if (r->va + r->size > va + len)
706 			break;
707 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
708 			was_writeable = true;
709 
710 		if (!mobj_is_paged(r->mobj))
711 			need_sync = true;
712 
713 		r->attr &= ~TEE_MATTR_PROT_MASK;
714 		r->attr |= prot;
715 	}
716 
717 	if (need_sync) {
718 		/* Synchronize changes to translation tables */
719 		vm_set_ctx(uctx->ts_ctx);
720 	}
721 
722 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
723 		if (r->va + r->size > va + len)
724 			break;
725 		if (mobj_is_paged(r->mobj)) {
726 			if (!tee_pager_set_um_area_attr(uctx, r->va, r->size,
727 							prot))
728 				panic();
729 		} else if (was_writeable) {
730 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
731 				       r->size);
732 		}
733 
734 	}
735 	if (need_sync && was_writeable)
736 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
737 
738 	merge_vm_range(uctx, va, len);
739 
740 	return TEE_SUCCESS;
741 }
742 
743 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
744 {
745 	TAILQ_REMOVE(&vmi->regions, reg, link);
746 	mobj_put(reg->mobj);
747 	free(reg);
748 }
749 
750 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
751 {
752 	TEE_Result res = TEE_SUCCESS;
753 	struct vm_region *r = NULL;
754 	struct vm_region *r_next = NULL;
755 	size_t end_va = 0;
756 	size_t unmap_end_va = 0;
757 	size_t l = 0;
758 
759 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
760 
761 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
762 		return TEE_ERROR_BAD_PARAMETERS;
763 
764 	if (!l || (va & SMALL_PAGE_MASK))
765 		return TEE_ERROR_BAD_PARAMETERS;
766 
767 	if (ADD_OVERFLOW(va, l, &end_va))
768 		return TEE_ERROR_BAD_PARAMETERS;
769 
770 	res = split_vm_range(uctx, va, l, NULL, &r);
771 	if (res)
772 		return res;
773 
774 	while (true) {
775 		r_next = TAILQ_NEXT(r, link);
776 		unmap_end_va = r->va + r->size;
777 		if (mobj_is_paged(r->mobj))
778 			tee_pager_rem_um_region(uctx, r->va, r->size);
779 		maybe_free_pgt(uctx, r);
780 		umap_remove_region(&uctx->vm_info, r);
781 		if (!r_next || unmap_end_va == end_va)
782 			break;
783 		r = r_next;
784 	}
785 
786 	/*
787 	 * Synchronize change to translation tables. Even though the pager
788 	 * case unmaps immediately we may still free a translation table.
789 	 */
790 	vm_set_ctx(uctx->ts_ctx);
791 
792 	return TEE_SUCCESS;
793 }
794 
795 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
796 {
797 	TEE_Result res;
798 	struct mobj *mobj;
799 	size_t offs;
800 	vaddr_t va;
801 	size_t sz;
802 
803 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
804 	if (sz) {
805 		res = vm_map(uctx, &va, sz, TEE_MATTR_PRX, VM_FLAG_PERMANENT,
806 			     mobj, offs);
807 		if (res)
808 			return res;
809 	}
810 
811 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
812 	if (sz)
813 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
814 			      mobj, offs);
815 
816 	return TEE_SUCCESS;
817 }
818 
819 TEE_Result vm_info_init(struct user_mode_ctx *uctx)
820 {
821 	TEE_Result res;
822 	uint32_t asid = asid_alloc();
823 
824 	if (!asid) {
825 		DMSG("Failed to allocate ASID");
826 		return TEE_ERROR_GENERIC;
827 	}
828 
829 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
830 	TAILQ_INIT(&uctx->vm_info.regions);
831 	uctx->vm_info.asid = asid;
832 
833 	res = map_kinit(uctx);
834 	if (res)
835 		vm_info_final(uctx);
836 	return res;
837 }
838 
839 void vm_clean_param(struct user_mode_ctx *uctx)
840 {
841 	struct vm_region *next_r;
842 	struct vm_region *r;
843 
844 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
845 		if (r->flags & VM_FLAG_EPHEMERAL) {
846 			if (mobj_is_paged(r->mobj))
847 				tee_pager_rem_um_region(uctx, r->va, r->size);
848 			maybe_free_pgt(uctx, r);
849 			umap_remove_region(&uctx->vm_info, r);
850 		}
851 	}
852 }
853 
854 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
855 {
856 	struct vm_region *r = NULL;
857 
858 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
859 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
860 }
861 
862 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
863 				       struct param_mem *mem, void **user_va)
864 {
865 	struct vm_region *region = NULL;
866 
867 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
868 		vaddr_t va = 0;
869 		size_t phys_offs = 0;
870 
871 		if (!(region->flags & VM_FLAG_EPHEMERAL))
872 			continue;
873 		if (mem->mobj != region->mobj)
874 			continue;
875 
876 		phys_offs = mobj_get_phys_offs(mem->mobj,
877 					       CORE_MMU_USER_PARAM_SIZE);
878 		phys_offs += mem->offs;
879 		if (phys_offs < region->offset)
880 			continue;
881 		if (phys_offs >= (region->offset + region->size))
882 			continue;
883 		va = region->va + phys_offs - region->offset;
884 		*user_va = (void *)va;
885 		return TEE_SUCCESS;
886 	}
887 	return TEE_ERROR_GENERIC;
888 }
889 
890 static int cmp_param_mem(const void *a0, const void *a1)
891 {
892 	const struct param_mem *m1 = a1;
893 	const struct param_mem *m0 = a0;
894 	int ret;
895 
896 	/* Make sure that invalid param_mem are placed last in the array */
897 	if (!m0->mobj && !m1->mobj)
898 		return 0;
899 	if (!m0->mobj)
900 		return 1;
901 	if (!m1->mobj)
902 		return -1;
903 
904 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
905 	if (ret)
906 		return ret;
907 
908 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
909 	if (ret)
910 		return ret;
911 
912 	ret = CMP_TRILEAN(m0->offs, m1->offs);
913 	if (ret)
914 		return ret;
915 
916 	return CMP_TRILEAN(m0->size, m1->size);
917 }
918 
919 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
920 			void *param_va[TEE_NUM_PARAMS])
921 {
922 	TEE_Result res = TEE_SUCCESS;
923 	size_t n;
924 	size_t m;
925 	struct param_mem mem[TEE_NUM_PARAMS];
926 
927 	memset(mem, 0, sizeof(mem));
928 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
929 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
930 		size_t phys_offs;
931 
932 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
933 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
934 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
935 			continue;
936 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
937 					       CORE_MMU_USER_PARAM_SIZE);
938 		mem[n].mobj = param->u[n].mem.mobj;
939 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
940 					CORE_MMU_USER_PARAM_SIZE);
941 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
942 				      mem[n].offs + param->u[n].mem.size,
943 				      CORE_MMU_USER_PARAM_SIZE);
944 		/*
945 		 * For size 0 (raw pointer parameter), add minimum size
946 		 * value to allow address to be mapped
947 		 */
948 		if (!mem[n].size)
949 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
950 	}
951 
952 	/*
953 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
954 	 * mobj pointer value since those entries can't be merged either,
955 	 * finally by offset.
956 	 *
957 	 * This should result in a list where all mergeable entries are
958 	 * next to each other and unused/invalid entries are at the end.
959 	 */
960 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
961 
962 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
963 		if (mem[n].mobj == mem[m].mobj &&
964 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
965 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
966 					      mem[n].offs, mem[n].size))) {
967 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
968 			continue;
969 		}
970 		m++;
971 		if (n != m)
972 			mem[m] = mem[n];
973 	}
974 	/*
975 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
976 	 * index of the last valid entry if the first entry is valid, else
977 	 * 0.
978 	 */
979 	if (mem[0].mobj)
980 		m++;
981 
982 	check_param_map_empty(uctx);
983 
984 	for (n = 0; n < m; n++) {
985 		vaddr_t va = 0;
986 
987 		res = vm_map(uctx, &va, mem[n].size,
988 			     TEE_MATTR_PRW | TEE_MATTR_URW,
989 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
990 			     mem[n].mobj, mem[n].offs);
991 		if (res)
992 			goto out;
993 	}
994 
995 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
996 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
997 
998 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
999 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1000 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1001 			continue;
1002 		if (!param->u[n].mem.mobj)
1003 			continue;
1004 
1005 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1006 					   param_va + n);
1007 		if (res != TEE_SUCCESS)
1008 			goto out;
1009 	}
1010 
1011 	res = alloc_pgt(uctx);
1012 out:
1013 	if (res)
1014 		vm_clean_param(uctx);
1015 
1016 	return res;
1017 }
1018 
1019 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1020 			vaddr_t *va)
1021 {
1022 	TEE_Result res;
1023 	struct vm_region *reg = calloc(1, sizeof(*reg));
1024 
1025 	if (!reg)
1026 		return TEE_ERROR_OUT_OF_MEMORY;
1027 
1028 	reg->mobj = mobj;
1029 	reg->offset = 0;
1030 	reg->va = 0;
1031 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1032 	if (mobj_is_secure(mobj))
1033 		reg->attr = TEE_MATTR_SECURE;
1034 	else
1035 		reg->attr = 0;
1036 
1037 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1038 	if (res) {
1039 		free(reg);
1040 		return res;
1041 	}
1042 
1043 	res = alloc_pgt(uctx);
1044 	if (res)
1045 		umap_remove_region(&uctx->vm_info, reg);
1046 	else
1047 		*va = reg->va;
1048 
1049 	return res;
1050 }
1051 
1052 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1053 {
1054 	struct vm_region *r = NULL;
1055 
1056 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1057 		if (r->mobj == mobj && r->va == va) {
1058 			if (mobj_is_paged(r->mobj))
1059 				tee_pager_rem_um_region(uctx, r->va, r->size);
1060 			maybe_free_pgt(uctx, r);
1061 			umap_remove_region(&uctx->vm_info, r);
1062 			return;
1063 		}
1064 	}
1065 }
1066 
1067 void vm_info_final(struct user_mode_ctx *uctx)
1068 {
1069 	if (!uctx->vm_info.asid)
1070 		return;
1071 
1072 	/* clear MMU entries to avoid clash when asid is reused */
1073 	tlbi_asid(uctx->vm_info.asid);
1074 
1075 	asid_free(uctx->vm_info.asid);
1076 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1077 		umap_remove_region(&uctx->vm_info,
1078 				   TAILQ_FIRST(&uctx->vm_info.regions));
1079 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1080 }
1081 
1082 /* return true only if buffer fits inside TA private memory */
1083 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1084 				 const void *va, size_t size)
1085 {
1086 	struct vm_region *r = NULL;
1087 
1088 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1089 		if (r->flags & VM_FLAGS_NONPRIV)
1090 			continue;
1091 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1092 			return true;
1093 	}
1094 
1095 	return false;
1096 }
1097 
1098 /* return true only if buffer intersects TA private memory */
1099 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1100 				  const void *va, size_t size)
1101 {
1102 	struct vm_region *r = NULL;
1103 
1104 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1105 		if (r->attr & VM_FLAGS_NONPRIV)
1106 			continue;
1107 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1108 			return true;
1109 	}
1110 
1111 	return false;
1112 }
1113 
1114 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1115 			       const void *va, size_t size,
1116 			       struct mobj **mobj, size_t *offs)
1117 {
1118 	struct vm_region *r = NULL;
1119 
1120 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1121 		if (!r->mobj)
1122 			continue;
1123 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1124 			size_t poffs;
1125 
1126 			poffs = mobj_get_phys_offs(r->mobj,
1127 						   CORE_MMU_USER_PARAM_SIZE);
1128 			*mobj = r->mobj;
1129 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1130 			return TEE_SUCCESS;
1131 		}
1132 	}
1133 
1134 	return TEE_ERROR_BAD_PARAMETERS;
1135 }
1136 
1137 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1138 					  void *ua, paddr_t *pa, uint32_t *attr)
1139 {
1140 	struct vm_region *region = NULL;
1141 
1142 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1143 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1144 					   region->size))
1145 			continue;
1146 
1147 		if (pa) {
1148 			TEE_Result res;
1149 			paddr_t p;
1150 			size_t offset;
1151 			size_t granule;
1152 
1153 			/*
1154 			 * mobj and input user address may each include
1155 			 * a specific offset-in-granule position.
1156 			 * Drop both to get target physical page base
1157 			 * address then apply only user address
1158 			 * offset-in-granule.
1159 			 * Mapping lowest granule is the small page.
1160 			 */
1161 			granule = MAX(region->mobj->phys_granule,
1162 				      (size_t)SMALL_PAGE_SIZE);
1163 			assert(!granule || IS_POWER_OF_TWO(granule));
1164 
1165 			offset = region->offset +
1166 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1167 
1168 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1169 			if (res != TEE_SUCCESS)
1170 				return res;
1171 
1172 			*pa = p | ((vaddr_t)ua & (granule - 1));
1173 		}
1174 		if (attr)
1175 			*attr = region->attr;
1176 
1177 		return TEE_SUCCESS;
1178 	}
1179 
1180 	return TEE_ERROR_ACCESS_DENIED;
1181 }
1182 
1183 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1184 {
1185 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1186 }
1187 
1188 TEE_Result vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, void **va)
1189 {
1190 	TEE_Result res = TEE_SUCCESS;
1191 	paddr_t p = 0;
1192 	struct vm_region *region = NULL;
1193 
1194 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1195 		size_t granule = 0;
1196 		size_t size = 0;
1197 		size_t ofs = 0;
1198 
1199 		/* pa2va is expected only for memory tracked through mobj */
1200 		if (!region->mobj)
1201 			continue;
1202 
1203 		/* Physically granulated memory object must be scanned */
1204 		granule = region->mobj->phys_granule;
1205 		assert(!granule || IS_POWER_OF_TWO(granule));
1206 
1207 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1208 
1209 			if (granule) {
1210 				/* From current offset to buffer/granule end */
1211 				size = granule - (ofs & (granule - 1));
1212 
1213 				if (size > (region->size - ofs))
1214 					size = region->size - ofs;
1215 			} else
1216 				size = region->size;
1217 
1218 			res = mobj_get_pa(region->mobj, ofs, granule, &p);
1219 			if (res != TEE_SUCCESS)
1220 				return res;
1221 
1222 			if (core_is_buffer_inside(pa, 1, p, size)) {
1223 				/* Remove region offset (mobj phys offset) */
1224 				ofs -= region->offset;
1225 				/* Get offset-in-granule */
1226 				p = pa - p;
1227 
1228 				*va = (void *)(region->va + ofs + (vaddr_t)p);
1229 				return TEE_SUCCESS;
1230 			}
1231 		}
1232 	}
1233 
1234 	return TEE_ERROR_ACCESS_DENIED;
1235 }
1236 
1237 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1238 				  uint32_t flags, uaddr_t uaddr, size_t len)
1239 {
1240 	uaddr_t a = 0;
1241 	uaddr_t end_addr = 0;
1242 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1243 			       CORE_MMU_USER_PARAM_SIZE);
1244 
1245 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1246 		return TEE_ERROR_ACCESS_DENIED;
1247 
1248 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1249 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1250 		return TEE_ERROR_ACCESS_DENIED;
1251 
1252 	/*
1253 	 * Rely on TA private memory test to check if address range is private
1254 	 * to TA or not.
1255 	 */
1256 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1257 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1258 		return TEE_ERROR_ACCESS_DENIED;
1259 
1260 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1261 		uint32_t attr;
1262 		TEE_Result res;
1263 
1264 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1265 		if (res != TEE_SUCCESS)
1266 			return res;
1267 
1268 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1269 		    (attr & TEE_MATTR_SECURE))
1270 			return TEE_ERROR_ACCESS_DENIED;
1271 
1272 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1273 		    !(attr & TEE_MATTR_SECURE))
1274 			return TEE_ERROR_ACCESS_DENIED;
1275 
1276 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1277 			return TEE_ERROR_ACCESS_DENIED;
1278 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1279 			return TEE_ERROR_ACCESS_DENIED;
1280 	}
1281 
1282 	return TEE_SUCCESS;
1283 }
1284 
1285 void vm_set_ctx(struct ts_ctx *ctx)
1286 {
1287 	struct thread_specific_data *tsd = thread_get_tsd();
1288 
1289 	core_mmu_set_user_map(NULL);
1290 	/*
1291 	 * No matter what happens below, the current user TA will not be
1292 	 * current any longer. Make sure pager is in sync with that.
1293 	 * This function has to be called before there's a chance that
1294 	 * pgt_free_unlocked() is called.
1295 	 *
1296 	 * Save translation tables in a cache if it's a user TA.
1297 	 */
1298 	pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx));
1299 
1300 	if (is_user_mode_ctx(ctx)) {
1301 		struct core_mmu_user_map map = { };
1302 		struct user_mode_ctx *uctx = to_user_mode_ctx(ctx);
1303 
1304 		core_mmu_create_user_map(uctx, &map);
1305 		core_mmu_set_user_map(&map);
1306 		tee_pager_assign_um_tables(uctx);
1307 	}
1308 	tsd->ctx = ctx;
1309 }
1310 
1311