xref: /optee_os/core/mm/vm.c (revision 8411e6ad673d20c4742ed30c785e3f5cdea54dfa)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(&uctx->vm_info)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, &uctx->vm_info);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	struct thread_specific_data *tsd = thread_get_tsd();
138 	struct pgt_cache *pgt_cache = NULL;
139 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
140 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
141 	struct vm_region *r2 = NULL;
142 
143 	if (uctx->ts_ctx == tsd->ctx)
144 		pgt_cache = &tsd->pgt_cache;
145 
146 	if (mobj_is_paged(r->mobj)) {
147 		tee_pager_rem_um_region(uctx, r->va, r->size);
148 	} else {
149 		pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
150 				    r->va + r->size);
151 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
152 				    uctx->vm_info.asid);
153 	}
154 
155 	r2 = TAILQ_NEXT(r, link);
156 	if (r2)
157 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
158 
159 	r2 = TAILQ_PREV(r, vm_region_head, link);
160 	if (r2)
161 		begin = MAX(begin,
162 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
163 
164 	/* If there's no unused page tables, there's nothing left to do */
165 	if (begin >= last)
166 		return;
167 
168 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
169 }
170 
171 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
172 				  size_t pad_begin, size_t pad_end,
173 				  size_t align)
174 {
175 	struct vm_region dummy_first_reg = { };
176 	struct vm_region dummy_last_reg = { };
177 	struct vm_region *r = NULL;
178 	struct vm_region *prev_r = NULL;
179 	vaddr_t va_range_base = 0;
180 	size_t va_range_size = 0;
181 	size_t granul;
182 	vaddr_t va = 0;
183 	size_t offs_plus_size = 0;
184 
185 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
186 	dummy_first_reg.va = va_range_base;
187 	dummy_last_reg.va = va_range_base + va_range_size;
188 
189 	/* Check alignment, it has to be at least SMALL_PAGE based */
190 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
191 		return TEE_ERROR_ACCESS_CONFLICT;
192 
193 	/* Check that the mobj is defined for the entire range */
194 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
195 		return TEE_ERROR_BAD_PARAMETERS;
196 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
197 		return TEE_ERROR_BAD_PARAMETERS;
198 
199 	granul = MAX(align, SMALL_PAGE_SIZE);
200 	if (!IS_POWER_OF_TWO(granul))
201 		return TEE_ERROR_BAD_PARAMETERS;
202 
203 	prev_r = &dummy_first_reg;
204 	TAILQ_FOREACH(r, &vmi->regions, link) {
205 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
206 					granul);
207 		if (va) {
208 			reg->va = va;
209 			TAILQ_INSERT_BEFORE(r, reg, link);
210 			return TEE_SUCCESS;
211 		}
212 		prev_r = r;
213 	}
214 
215 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
216 	if (!r)
217 		r = &dummy_first_reg;
218 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
219 				granul);
220 	if (va) {
221 		reg->va = va;
222 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
223 		return TEE_SUCCESS;
224 	}
225 
226 	return TEE_ERROR_ACCESS_CONFLICT;
227 }
228 
229 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
230 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
231 		      size_t offs, size_t pad_begin, size_t pad_end,
232 		      size_t align)
233 {
234 	TEE_Result res = TEE_SUCCESS;
235 	struct vm_region *reg = NULL;
236 	uint32_t attr = 0;
237 
238 	if (prot & ~TEE_MATTR_PROT_MASK)
239 		return TEE_ERROR_BAD_PARAMETERS;
240 
241 	reg = calloc(1, sizeof(*reg));
242 	if (!reg)
243 		return TEE_ERROR_OUT_OF_MEMORY;
244 
245 	if (!mobj_is_paged(mobj)) {
246 		uint32_t mem_type = 0;
247 
248 		res = mobj_get_mem_type(mobj, &mem_type);
249 		if (res)
250 			goto err_free_reg;
251 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
252 	}
253 	attr |= TEE_MATTR_VALID_BLOCK;
254 	if (mobj_is_secure(mobj))
255 		attr |= TEE_MATTR_SECURE;
256 
257 	reg->mobj = mobj_get(mobj);
258 	reg->offset = offs;
259 	reg->va = *va;
260 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
261 	reg->attr = attr | prot;
262 	reg->flags = flags;
263 
264 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
265 	if (res)
266 		goto err_put_mobj;
267 
268 	res = alloc_pgt(uctx);
269 	if (res)
270 		goto err_rem_reg;
271 
272 	if (mobj_is_paged(mobj)) {
273 		struct fobj *fobj = mobj_get_fobj(mobj);
274 
275 		if (!fobj) {
276 			res = TEE_ERROR_GENERIC;
277 			goto err_rem_reg;
278 		}
279 
280 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
281 		fobj_put(fobj);
282 		if (res)
283 			goto err_rem_reg;
284 	}
285 
286 	/*
287 	 * If the context currently is active set it again to update
288 	 * the mapping.
289 	 */
290 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
291 		vm_set_ctx(uctx->ts_ctx);
292 
293 	*va = reg->va;
294 
295 	return TEE_SUCCESS;
296 
297 err_rem_reg:
298 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
299 err_put_mobj:
300 	mobj_put(reg->mobj);
301 err_free_reg:
302 	free(reg);
303 	return res;
304 }
305 
306 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
307 {
308 	struct vm_region *r = NULL;
309 
310 	TAILQ_FOREACH(r, &vm_info->regions, link)
311 		if (va >= r->va && va < r->va + r->size)
312 			return r;
313 
314 	return NULL;
315 }
316 
317 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
318 				   size_t len,
319 				   bool (*cmp_regs)(const struct vm_region *r0,
320 						    const struct vm_region *r,
321 						    const struct vm_region *rn))
322 {
323 	struct vm_region *r = r0;
324 	vaddr_t end_va = 0;
325 
326 	if (ADD_OVERFLOW(va, len, &end_va))
327 		return false;
328 
329 	while (true) {
330 		struct vm_region *r_next = TAILQ_NEXT(r, link);
331 		vaddr_t r_end_va = r->va + r->size;
332 
333 		if (r_end_va >= end_va)
334 			return true;
335 		if (!r_next)
336 			return false;
337 		if (r_end_va != r_next->va)
338 			return false;
339 		if (cmp_regs && !cmp_regs(r0, r, r_next))
340 			return false;
341 		r = r_next;
342 	}
343 }
344 
345 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
346 				  struct vm_region *r, vaddr_t va)
347 {
348 	struct vm_region *r2 = NULL;
349 	size_t diff = va - r->va;
350 
351 	assert(diff && diff < r->size);
352 
353 	r2 = calloc(1, sizeof(*r2));
354 	if (!r2)
355 		return TEE_ERROR_OUT_OF_MEMORY;
356 
357 	if (mobj_is_paged(r->mobj)) {
358 		TEE_Result res = tee_pager_split_um_region(uctx, va);
359 
360 		if (res) {
361 			free(r2);
362 			return res;
363 		}
364 	}
365 
366 	r2->mobj = mobj_get(r->mobj);
367 	r2->offset = r->offset + diff;
368 	r2->va = va;
369 	r2->size = r->size - diff;
370 	r2->attr = r->attr;
371 	r2->flags = r->flags;
372 
373 	r->size = diff;
374 
375 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
376 
377 	return TEE_SUCCESS;
378 }
379 
380 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
381 				 size_t len,
382 				 bool (*cmp_regs)(const struct vm_region *r0,
383 						  const struct vm_region *r,
384 						  const struct vm_region *rn),
385 				 struct vm_region **r0_ret)
386 {
387 	TEE_Result res = TEE_SUCCESS;
388 	struct vm_region *r = NULL;
389 	vaddr_t end_va = 0;
390 
391 	if ((va | len) & SMALL_PAGE_MASK)
392 		return TEE_ERROR_BAD_PARAMETERS;
393 
394 	if (ADD_OVERFLOW(va, len, &end_va))
395 		return TEE_ERROR_BAD_PARAMETERS;
396 
397 	/*
398 	 * Find first vm_region in range and check that the entire range is
399 	 * contiguous.
400 	 */
401 	r = find_vm_region(&uctx->vm_info, va);
402 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
403 		return TEE_ERROR_BAD_PARAMETERS;
404 
405 	/*
406 	 * If needed split regions so that va and len covers only complete
407 	 * regions.
408 	 */
409 	if (va != r->va) {
410 		res = split_vm_region(uctx, r, va);
411 		if (res)
412 			return res;
413 		r = TAILQ_NEXT(r, link);
414 	}
415 
416 	*r0_ret = r;
417 	r = find_vm_region(&uctx->vm_info, va + len - 1);
418 	if (!r)
419 		return TEE_ERROR_BAD_PARAMETERS;
420 	if (end_va != r->va + r->size) {
421 		res = split_vm_region(uctx, r, end_va);
422 		if (res)
423 			return res;
424 	}
425 
426 	return TEE_SUCCESS;
427 }
428 
429 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
430 {
431 	struct vm_region *r_next = NULL;
432 	struct vm_region *r = NULL;
433 	vaddr_t end_va = 0;
434 
435 	if (ADD_OVERFLOW(va, len, &end_va))
436 		return;
437 
438 	tee_pager_merge_um_region(uctx, va, len);
439 
440 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
441 		r_next = TAILQ_NEXT(r, link);
442 		if (!r_next)
443 			return;
444 
445 		/* Try merging with the region just before va */
446 		if (r->va + r->size < va)
447 			continue;
448 
449 		/*
450 		 * If r->va is well past our range we're done.
451 		 * Note that if it's just the page after our range we'll
452 		 * try to merge.
453 		 */
454 		if (r->va > end_va)
455 			return;
456 
457 		if (r->va + r->size != r_next->va)
458 			continue;
459 		if (r->mobj != r_next->mobj ||
460 		    r->flags != r_next->flags ||
461 		    r->attr != r_next->attr)
462 			continue;
463 		if (r->offset + r->size != r_next->offset)
464 			continue;
465 
466 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
467 		r->size += r_next->size;
468 		mobj_put(r_next->mobj);
469 		free(r_next);
470 		r_next = r;
471 	}
472 }
473 
474 static bool cmp_region_for_remap(const struct vm_region *r0,
475 				 const struct vm_region *r,
476 				 const struct vm_region *rn)
477 {
478 	/*
479 	 * All the essentionals has to match for remap to make sense. The
480 	 * essentials are, mobj/fobj, attr, flags and the offset should be
481 	 * contiguous.
482 	 *
483 	 * Note that vm_remap() depends on mobj/fobj to be the same.
484 	 */
485 	return r0->flags == r->flags && r0->attr == r->attr &&
486 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
487 }
488 
489 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
490 		    size_t len, size_t pad_begin, size_t pad_end)
491 {
492 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
493 	TEE_Result res = TEE_SUCCESS;
494 	struct vm_region *r0 = NULL;
495 	struct vm_region *r = NULL;
496 	struct vm_region *r_next = NULL;
497 	struct vm_region *r_last = NULL;
498 	struct vm_region *r_first = NULL;
499 	struct fobj *fobj = NULL;
500 	vaddr_t next_va = 0;
501 
502 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
503 
504 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
505 		return TEE_ERROR_BAD_PARAMETERS;
506 
507 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
508 	if (res)
509 		return res;
510 
511 	if (mobj_is_paged(r0->mobj)) {
512 		fobj = mobj_get_fobj(r0->mobj);
513 		if (!fobj)
514 			panic();
515 	}
516 
517 	for (r = r0; r; r = r_next) {
518 		if (r->va + r->size > old_va + len)
519 			break;
520 		r_next = TAILQ_NEXT(r, link);
521 		rem_um_region(uctx, r);
522 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
523 		TAILQ_INSERT_TAIL(&regs, r, link);
524 	}
525 
526 	/*
527 	 * Synchronize change to translation tables. Even though the pager
528 	 * case unmaps immediately we may still free a translation table.
529 	 */
530 	vm_set_ctx(uctx->ts_ctx);
531 
532 	r_first = TAILQ_FIRST(&regs);
533 	while (!TAILQ_EMPTY(&regs)) {
534 		r = TAILQ_FIRST(&regs);
535 		TAILQ_REMOVE(&regs, r, link);
536 		if (r_last) {
537 			r->va = r_last->va + r_last->size;
538 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
539 		} else {
540 			r->va = *new_va;
541 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
542 					      pad_end + len - r->size, 0);
543 		}
544 		if (!res)
545 			r_last = r;
546 		if (!res)
547 			res = alloc_pgt(uctx);
548 		if (fobj && !res)
549 			res = tee_pager_add_um_region(uctx, r->va, fobj,
550 						      r->attr);
551 
552 		if (res) {
553 			/*
554 			 * Something went wrong move all the recently added
555 			 * regions back to regs for later reinsertion at
556 			 * the original spot.
557 			 */
558 			struct vm_region *r_tmp = NULL;
559 			struct vm_region *r_stop = NULL;
560 
561 			if (r != r_last) {
562 				/*
563 				 * umap_add_region() failed, move r back to
564 				 * regs before all the rest are moved back.
565 				 */
566 				TAILQ_INSERT_HEAD(&regs, r, link);
567 			}
568 			if (r_last)
569 				r_stop = TAILQ_NEXT(r_last, link);
570 			for (r = r_first; r != r_stop; r = r_next) {
571 				r_next = TAILQ_NEXT(r, link);
572 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
573 				if (r_tmp)
574 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
575 							   link);
576 				else
577 					TAILQ_INSERT_HEAD(&regs, r, link);
578 				r_tmp = r;
579 			}
580 
581 			goto err_restore_map;
582 		}
583 	}
584 
585 	fobj_put(fobj);
586 
587 	vm_set_ctx(uctx->ts_ctx);
588 	*new_va = r_first->va;
589 
590 	return TEE_SUCCESS;
591 
592 err_restore_map:
593 	next_va = old_va;
594 	while (!TAILQ_EMPTY(&regs)) {
595 		r = TAILQ_FIRST(&regs);
596 		TAILQ_REMOVE(&regs, r, link);
597 		r->va = next_va;
598 		next_va += r->size;
599 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
600 			panic("Cannot restore mapping");
601 		if (alloc_pgt(uctx))
602 			panic("Cannot restore mapping");
603 		if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
604 			panic("Cannot restore mapping");
605 	}
606 	fobj_put(fobj);
607 	vm_set_ctx(uctx->ts_ctx);
608 
609 	return res;
610 }
611 
612 static bool cmp_region_for_get_flags(const struct vm_region *r0,
613 				     const struct vm_region *r,
614 				     const struct vm_region *rn __unused)
615 {
616 	return r0->flags == r->flags;
617 }
618 
619 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
620 			uint32_t *flags)
621 {
622 	struct vm_region *r = NULL;
623 
624 	if (!len || ((len | va) & SMALL_PAGE_MASK))
625 		return TEE_ERROR_BAD_PARAMETERS;
626 
627 	r = find_vm_region(&uctx->vm_info, va);
628 	if (!r)
629 		return TEE_ERROR_BAD_PARAMETERS;
630 
631 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
632 		return TEE_ERROR_BAD_PARAMETERS;
633 
634 	*flags = r->flags;
635 
636 	return TEE_SUCCESS;
637 }
638 
639 static bool cmp_region_for_get_prot(const struct vm_region *r0,
640 				    const struct vm_region *r,
641 				    const struct vm_region *rn __unused)
642 {
643 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
644 	       (r->attr & TEE_MATTR_PROT_MASK);
645 }
646 
647 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
648 		       uint16_t *prot)
649 {
650 	struct vm_region *r = NULL;
651 
652 	if (!len || ((len | va) & SMALL_PAGE_MASK))
653 		return TEE_ERROR_BAD_PARAMETERS;
654 
655 	r = find_vm_region(&uctx->vm_info, va);
656 	if (!r)
657 		return TEE_ERROR_BAD_PARAMETERS;
658 
659 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
660 		return TEE_ERROR_BAD_PARAMETERS;
661 
662 	*prot = r->attr & TEE_MATTR_PROT_MASK;
663 
664 	return TEE_SUCCESS;
665 }
666 
667 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
668 		       uint32_t prot)
669 {
670 	TEE_Result res = TEE_SUCCESS;
671 	struct vm_region *r0 = NULL;
672 	struct vm_region *r = NULL;
673 	bool was_writeable = false;
674 	bool need_sync = false;
675 
676 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
677 
678 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
679 		return TEE_ERROR_BAD_PARAMETERS;
680 
681 	res = split_vm_range(uctx, va, len, NULL, &r0);
682 	if (res)
683 		return res;
684 
685 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
686 		if (r->va + r->size > va + len)
687 			break;
688 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
689 			was_writeable = true;
690 
691 		if (!mobj_is_paged(r->mobj))
692 			need_sync = true;
693 
694 		r->attr &= ~TEE_MATTR_PROT_MASK;
695 		r->attr |= prot;
696 	}
697 
698 	if (need_sync) {
699 		/* Synchronize changes to translation tables */
700 		vm_set_ctx(uctx->ts_ctx);
701 	}
702 
703 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
704 		if (r->va + r->size > va + len)
705 			break;
706 		if (mobj_is_paged(r->mobj)) {
707 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
708 							  prot))
709 				panic();
710 		} else if (was_writeable) {
711 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
712 				       r->size);
713 		}
714 
715 	}
716 	if (need_sync && was_writeable)
717 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
718 
719 	merge_vm_range(uctx, va, len);
720 
721 	return TEE_SUCCESS;
722 }
723 
724 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
725 {
726 	TAILQ_REMOVE(&vmi->regions, reg, link);
727 	mobj_put(reg->mobj);
728 	free(reg);
729 }
730 
731 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
732 {
733 	TEE_Result res = TEE_SUCCESS;
734 	struct vm_region *r = NULL;
735 	struct vm_region *r_next = NULL;
736 	size_t end_va = 0;
737 	size_t unmap_end_va = 0;
738 	size_t l = 0;
739 
740 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
741 
742 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
743 		return TEE_ERROR_BAD_PARAMETERS;
744 
745 	if (!l || (va & SMALL_PAGE_MASK))
746 		return TEE_ERROR_BAD_PARAMETERS;
747 
748 	if (ADD_OVERFLOW(va, l, &end_va))
749 		return TEE_ERROR_BAD_PARAMETERS;
750 
751 	res = split_vm_range(uctx, va, l, NULL, &r);
752 	if (res)
753 		return res;
754 
755 	while (true) {
756 		r_next = TAILQ_NEXT(r, link);
757 		unmap_end_va = r->va + r->size;
758 		rem_um_region(uctx, r);
759 		umap_remove_region(&uctx->vm_info, r);
760 		if (!r_next || unmap_end_va == end_va)
761 			break;
762 		r = r_next;
763 	}
764 
765 	return TEE_SUCCESS;
766 }
767 
768 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
769 {
770 	TEE_Result res = TEE_SUCCESS;
771 	struct mobj *mobj = NULL;
772 	size_t offs = 0;
773 	vaddr_t va = 0;
774 	size_t sz = 0;
775 	uint32_t prot = 0;
776 
777 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
778 	if (sz) {
779 		prot = TEE_MATTR_PRX;
780 		if (IS_ENABLED(CFG_CORE_BTI))
781 			prot |= TEE_MATTR_GUARDED;
782 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
783 			     mobj, offs);
784 		if (res)
785 			return res;
786 	}
787 
788 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
789 	if (sz)
790 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
791 			      mobj, offs);
792 
793 	return TEE_SUCCESS;
794 }
795 
796 TEE_Result vm_info_init(struct user_mode_ctx *uctx)
797 {
798 	TEE_Result res;
799 	uint32_t asid = asid_alloc();
800 
801 	if (!asid) {
802 		DMSG("Failed to allocate ASID");
803 		return TEE_ERROR_GENERIC;
804 	}
805 
806 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
807 	TAILQ_INIT(&uctx->vm_info.regions);
808 	uctx->vm_info.asid = asid;
809 
810 	res = map_kinit(uctx);
811 	if (res)
812 		vm_info_final(uctx);
813 	return res;
814 }
815 
816 void vm_clean_param(struct user_mode_ctx *uctx)
817 {
818 	struct vm_region *next_r;
819 	struct vm_region *r;
820 
821 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
822 		if (r->flags & VM_FLAG_EPHEMERAL) {
823 			rem_um_region(uctx, r);
824 			umap_remove_region(&uctx->vm_info, r);
825 		}
826 	}
827 }
828 
829 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
830 {
831 	struct vm_region *r = NULL;
832 
833 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
834 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
835 }
836 
837 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
838 				       struct param_mem *mem, void **user_va)
839 {
840 	struct vm_region *region = NULL;
841 
842 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
843 		vaddr_t va = 0;
844 		size_t phys_offs = 0;
845 
846 		if (!(region->flags & VM_FLAG_EPHEMERAL))
847 			continue;
848 		if (mem->mobj != region->mobj)
849 			continue;
850 
851 		phys_offs = mobj_get_phys_offs(mem->mobj,
852 					       CORE_MMU_USER_PARAM_SIZE);
853 		phys_offs += mem->offs;
854 		if (phys_offs < region->offset)
855 			continue;
856 		if (phys_offs >= (region->offset + region->size))
857 			continue;
858 		va = region->va + phys_offs - region->offset;
859 		*user_va = (void *)va;
860 		return TEE_SUCCESS;
861 	}
862 	return TEE_ERROR_GENERIC;
863 }
864 
865 static int cmp_param_mem(const void *a0, const void *a1)
866 {
867 	const struct param_mem *m1 = a1;
868 	const struct param_mem *m0 = a0;
869 	int ret;
870 
871 	/* Make sure that invalid param_mem are placed last in the array */
872 	if (!m0->mobj && !m1->mobj)
873 		return 0;
874 	if (!m0->mobj)
875 		return 1;
876 	if (!m1->mobj)
877 		return -1;
878 
879 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
880 	if (ret)
881 		return ret;
882 
883 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
884 	if (ret)
885 		return ret;
886 
887 	ret = CMP_TRILEAN(m0->offs, m1->offs);
888 	if (ret)
889 		return ret;
890 
891 	return CMP_TRILEAN(m0->size, m1->size);
892 }
893 
894 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
895 			void *param_va[TEE_NUM_PARAMS])
896 {
897 	TEE_Result res = TEE_SUCCESS;
898 	size_t n;
899 	size_t m;
900 	struct param_mem mem[TEE_NUM_PARAMS];
901 
902 	memset(mem, 0, sizeof(mem));
903 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
904 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
905 		size_t phys_offs;
906 
907 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
908 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
909 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
910 			continue;
911 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
912 					       CORE_MMU_USER_PARAM_SIZE);
913 		mem[n].mobj = param->u[n].mem.mobj;
914 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
915 					CORE_MMU_USER_PARAM_SIZE);
916 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
917 				      mem[n].offs + param->u[n].mem.size,
918 				      CORE_MMU_USER_PARAM_SIZE);
919 		/*
920 		 * For size 0 (raw pointer parameter), add minimum size
921 		 * value to allow address to be mapped
922 		 */
923 		if (!mem[n].size)
924 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
925 	}
926 
927 	/*
928 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
929 	 * mobj pointer value since those entries can't be merged either,
930 	 * finally by offset.
931 	 *
932 	 * This should result in a list where all mergeable entries are
933 	 * next to each other and unused/invalid entries are at the end.
934 	 */
935 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
936 
937 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
938 		if (mem[n].mobj == mem[m].mobj &&
939 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
940 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
941 					      mem[n].offs, mem[n].size))) {
942 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
943 			continue;
944 		}
945 		m++;
946 		if (n != m)
947 			mem[m] = mem[n];
948 	}
949 	/*
950 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
951 	 * index of the last valid entry if the first entry is valid, else
952 	 * 0.
953 	 */
954 	if (mem[0].mobj)
955 		m++;
956 
957 	check_param_map_empty(uctx);
958 
959 	for (n = 0; n < m; n++) {
960 		vaddr_t va = 0;
961 
962 		res = vm_map(uctx, &va, mem[n].size,
963 			     TEE_MATTR_PRW | TEE_MATTR_URW,
964 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
965 			     mem[n].mobj, mem[n].offs);
966 		if (res)
967 			goto out;
968 	}
969 
970 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
971 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
972 
973 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
974 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
975 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
976 			continue;
977 		if (!param->u[n].mem.mobj)
978 			continue;
979 
980 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
981 					   param_va + n);
982 		if (res != TEE_SUCCESS)
983 			goto out;
984 	}
985 
986 	res = alloc_pgt(uctx);
987 out:
988 	if (res)
989 		vm_clean_param(uctx);
990 
991 	return res;
992 }
993 
994 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
995 			vaddr_t *va)
996 {
997 	TEE_Result res = TEE_SUCCESS;
998 	struct vm_region *reg = NULL;
999 
1000 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
1001 		return TEE_ERROR_BAD_PARAMETERS;
1002 
1003 	reg = calloc(1, sizeof(*reg));
1004 	if (!reg)
1005 		return TEE_ERROR_OUT_OF_MEMORY;
1006 
1007 	reg->mobj = mobj;
1008 	reg->offset = 0;
1009 	reg->va = 0;
1010 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1011 	reg->attr = TEE_MATTR_SECURE;
1012 
1013 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1014 	if (res) {
1015 		free(reg);
1016 		return res;
1017 	}
1018 
1019 	res = alloc_pgt(uctx);
1020 	if (res)
1021 		umap_remove_region(&uctx->vm_info, reg);
1022 	else
1023 		*va = reg->va;
1024 
1025 	return res;
1026 }
1027 
1028 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1029 {
1030 	struct vm_region *r = NULL;
1031 
1032 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1033 		if (r->mobj == mobj && r->va == va) {
1034 			rem_um_region(uctx, r);
1035 			umap_remove_region(&uctx->vm_info, r);
1036 			return;
1037 		}
1038 	}
1039 }
1040 
1041 void vm_info_final(struct user_mode_ctx *uctx)
1042 {
1043 	if (!uctx->vm_info.asid)
1044 		return;
1045 
1046 	/* clear MMU entries to avoid clash when asid is reused */
1047 	tlbi_asid(uctx->vm_info.asid);
1048 
1049 	asid_free(uctx->vm_info.asid);
1050 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1051 		umap_remove_region(&uctx->vm_info,
1052 				   TAILQ_FIRST(&uctx->vm_info.regions));
1053 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1054 }
1055 
1056 /* return true only if buffer fits inside TA private memory */
1057 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1058 				 const void *va, size_t size)
1059 {
1060 	struct vm_region *r = NULL;
1061 
1062 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1063 		if (r->flags & VM_FLAGS_NONPRIV)
1064 			continue;
1065 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1066 			return true;
1067 	}
1068 
1069 	return false;
1070 }
1071 
1072 /* return true only if buffer intersects TA private memory */
1073 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1074 				  const void *va, size_t size)
1075 {
1076 	struct vm_region *r = NULL;
1077 
1078 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1079 		if (r->attr & VM_FLAGS_NONPRIV)
1080 			continue;
1081 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1082 			return true;
1083 	}
1084 
1085 	return false;
1086 }
1087 
1088 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1089 			       const void *va, size_t size,
1090 			       struct mobj **mobj, size_t *offs)
1091 {
1092 	struct vm_region *r = NULL;
1093 
1094 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1095 		if (!r->mobj)
1096 			continue;
1097 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1098 			size_t poffs;
1099 
1100 			poffs = mobj_get_phys_offs(r->mobj,
1101 						   CORE_MMU_USER_PARAM_SIZE);
1102 			*mobj = r->mobj;
1103 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1104 			return TEE_SUCCESS;
1105 		}
1106 	}
1107 
1108 	return TEE_ERROR_BAD_PARAMETERS;
1109 }
1110 
1111 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1112 					  void *ua, paddr_t *pa, uint32_t *attr)
1113 {
1114 	struct vm_region *region = NULL;
1115 
1116 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1117 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1118 					   region->size))
1119 			continue;
1120 
1121 		if (pa) {
1122 			TEE_Result res;
1123 			paddr_t p;
1124 			size_t offset;
1125 			size_t granule;
1126 
1127 			/*
1128 			 * mobj and input user address may each include
1129 			 * a specific offset-in-granule position.
1130 			 * Drop both to get target physical page base
1131 			 * address then apply only user address
1132 			 * offset-in-granule.
1133 			 * Mapping lowest granule is the small page.
1134 			 */
1135 			granule = MAX(region->mobj->phys_granule,
1136 				      (size_t)SMALL_PAGE_SIZE);
1137 			assert(!granule || IS_POWER_OF_TWO(granule));
1138 
1139 			offset = region->offset +
1140 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1141 
1142 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1143 			if (res != TEE_SUCCESS)
1144 				return res;
1145 
1146 			*pa = p | ((vaddr_t)ua & (granule - 1));
1147 		}
1148 		if (attr)
1149 			*attr = region->attr;
1150 
1151 		return TEE_SUCCESS;
1152 	}
1153 
1154 	return TEE_ERROR_ACCESS_DENIED;
1155 }
1156 
1157 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1158 {
1159 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1160 }
1161 
1162 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1163 {
1164 	paddr_t p = 0;
1165 	struct vm_region *region = NULL;
1166 
1167 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1168 		size_t granule = 0;
1169 		size_t size = 0;
1170 		size_t ofs = 0;
1171 
1172 		/* pa2va is expected only for memory tracked through mobj */
1173 		if (!region->mobj)
1174 			continue;
1175 
1176 		/* Physically granulated memory object must be scanned */
1177 		granule = region->mobj->phys_granule;
1178 		assert(!granule || IS_POWER_OF_TWO(granule));
1179 
1180 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1181 
1182 			if (granule) {
1183 				/* From current offset to buffer/granule end */
1184 				size = granule - (ofs & (granule - 1));
1185 
1186 				if (size > (region->size - ofs))
1187 					size = region->size - ofs;
1188 			} else {
1189 				size = region->size;
1190 			}
1191 
1192 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1193 				continue;
1194 
1195 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1196 				/* Remove region offset (mobj phys offset) */
1197 				ofs -= region->offset;
1198 				/* Get offset-in-granule */
1199 				p = pa - p;
1200 
1201 				return (void *)(region->va + ofs + (vaddr_t)p);
1202 			}
1203 		}
1204 	}
1205 
1206 	return NULL;
1207 }
1208 
1209 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1210 				  uint32_t flags, uaddr_t uaddr, size_t len)
1211 {
1212 	uaddr_t a = 0;
1213 	uaddr_t end_addr = 0;
1214 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1215 			       CORE_MMU_USER_PARAM_SIZE);
1216 
1217 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1218 		return TEE_ERROR_ACCESS_DENIED;
1219 
1220 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1221 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1222 		return TEE_ERROR_ACCESS_DENIED;
1223 
1224 	/*
1225 	 * Rely on TA private memory test to check if address range is private
1226 	 * to TA or not.
1227 	 */
1228 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1229 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1230 		return TEE_ERROR_ACCESS_DENIED;
1231 
1232 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1233 		uint32_t attr;
1234 		TEE_Result res;
1235 
1236 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1237 		if (res != TEE_SUCCESS)
1238 			return res;
1239 
1240 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1241 		    (attr & TEE_MATTR_SECURE))
1242 			return TEE_ERROR_ACCESS_DENIED;
1243 
1244 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1245 		    !(attr & TEE_MATTR_SECURE))
1246 			return TEE_ERROR_ACCESS_DENIED;
1247 
1248 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1249 			return TEE_ERROR_ACCESS_DENIED;
1250 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1251 			return TEE_ERROR_ACCESS_DENIED;
1252 	}
1253 
1254 	return TEE_SUCCESS;
1255 }
1256 
1257 void vm_set_ctx(struct ts_ctx *ctx)
1258 {
1259 	struct thread_specific_data *tsd = thread_get_tsd();
1260 
1261 	core_mmu_set_user_map(NULL);
1262 	/*
1263 	 * No matter what happens below, the current user TA will not be
1264 	 * current any longer. Make sure pager is in sync with that.
1265 	 * This function has to be called before there's a chance that
1266 	 * pgt_free_unlocked() is called.
1267 	 *
1268 	 * Save translation tables in a cache if it's a user TA.
1269 	 */
1270 	pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx));
1271 
1272 	if (is_user_mode_ctx(ctx)) {
1273 		struct core_mmu_user_map map = { };
1274 		struct user_mode_ctx *uctx = to_user_mode_ctx(ctx);
1275 
1276 		core_mmu_create_user_map(uctx, &map);
1277 		core_mmu_set_user_map(&map);
1278 		tee_pager_assign_um_tables(uctx);
1279 	}
1280 	tsd->ctx = ctx;
1281 }
1282 
1283 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1284 			 uint16_t *prot, size_t *offs)
1285 {
1286 	struct vm_region *r = NULL;
1287 	size_t r_offs = 0;
1288 
1289 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1290 		return NULL;
1291 
1292 	r = find_vm_region(&uctx->vm_info, va);
1293 	if (!r)
1294 		return NULL;
1295 
1296 	r_offs = va - r->va;
1297 
1298 	*len = MIN(r->size - r_offs, *len);
1299 	*offs = r->offset + r_offs;
1300 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1301 	return mobj_get(r->mobj);
1302 }
1303