xref: /optee_os/core/mm/vm.c (revision 60d3fc697d9bb597ce1fffc69a07f30b45efa435)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(&uctx->vm_info)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_alloc(&tsd->pgt_cache, uctx->ts_ctx, &uctx->vm_info);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	struct thread_specific_data *tsd = thread_get_tsd();
138 	struct pgt_cache *pgt_cache = NULL;
139 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
140 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
141 	struct vm_region *r2 = NULL;
142 
143 	if (uctx->ts_ctx == tsd->ctx)
144 		pgt_cache = &tsd->pgt_cache;
145 
146 	if (mobj_is_paged(r->mobj)) {
147 		tee_pager_rem_um_region(uctx, r->va, r->size);
148 	} else {
149 		pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
150 				    r->va + r->size);
151 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
152 				    uctx->vm_info.asid);
153 	}
154 
155 	r2 = TAILQ_NEXT(r, link);
156 	if (r2)
157 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
158 
159 	r2 = TAILQ_PREV(r, vm_region_head, link);
160 	if (r2)
161 		begin = MAX(begin,
162 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
163 
164 	/* If there's no unused page tables, there's nothing left to do */
165 	if (begin >= last)
166 		return;
167 
168 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
169 }
170 
171 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
172 				  size_t pad_begin, size_t pad_end,
173 				  size_t align)
174 {
175 	struct vm_region dummy_first_reg = { };
176 	struct vm_region dummy_last_reg = { };
177 	struct vm_region *r = NULL;
178 	struct vm_region *prev_r = NULL;
179 	vaddr_t va_range_base = 0;
180 	size_t va_range_size = 0;
181 	size_t granul;
182 	vaddr_t va = 0;
183 	size_t offs_plus_size = 0;
184 
185 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
186 	dummy_first_reg.va = va_range_base;
187 	dummy_last_reg.va = va_range_base + va_range_size;
188 
189 	/* Check alignment, it has to be at least SMALL_PAGE based */
190 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
191 		return TEE_ERROR_ACCESS_CONFLICT;
192 
193 	/* Check that the mobj is defined for the entire range */
194 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
195 		return TEE_ERROR_BAD_PARAMETERS;
196 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
197 		return TEE_ERROR_BAD_PARAMETERS;
198 
199 	granul = MAX(align, SMALL_PAGE_SIZE);
200 	if (!IS_POWER_OF_TWO(granul))
201 		return TEE_ERROR_BAD_PARAMETERS;
202 
203 	prev_r = &dummy_first_reg;
204 	TAILQ_FOREACH(r, &vmi->regions, link) {
205 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
206 					granul);
207 		if (va) {
208 			reg->va = va;
209 			TAILQ_INSERT_BEFORE(r, reg, link);
210 			return TEE_SUCCESS;
211 		}
212 		prev_r = r;
213 	}
214 
215 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
216 	if (!r)
217 		r = &dummy_first_reg;
218 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
219 				granul);
220 	if (va) {
221 		reg->va = va;
222 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
223 		return TEE_SUCCESS;
224 	}
225 
226 	return TEE_ERROR_ACCESS_CONFLICT;
227 }
228 
229 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
230 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
231 		      size_t offs, size_t pad_begin, size_t pad_end,
232 		      size_t align)
233 {
234 	TEE_Result res = TEE_SUCCESS;
235 	struct vm_region *reg = NULL;
236 	uint32_t attr = 0;
237 
238 	if (prot & ~TEE_MATTR_PROT_MASK)
239 		return TEE_ERROR_BAD_PARAMETERS;
240 
241 	reg = calloc(1, sizeof(*reg));
242 	if (!reg)
243 		return TEE_ERROR_OUT_OF_MEMORY;
244 
245 	if (!mobj_is_paged(mobj)) {
246 		uint32_t mem_type = 0;
247 
248 		res = mobj_get_mem_type(mobj, &mem_type);
249 		if (res)
250 			goto err_free_reg;
251 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
252 	}
253 	attr |= TEE_MATTR_VALID_BLOCK;
254 	if (mobj_is_secure(mobj))
255 		attr |= TEE_MATTR_SECURE;
256 
257 	reg->mobj = mobj_get(mobj);
258 	reg->offset = offs;
259 	reg->va = *va;
260 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
261 	reg->attr = attr | prot;
262 	reg->flags = flags;
263 
264 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
265 	if (res)
266 		goto err_put_mobj;
267 
268 	res = alloc_pgt(uctx);
269 	if (res)
270 		goto err_rem_reg;
271 
272 	if (mobj_is_paged(mobj)) {
273 		struct fobj *fobj = mobj_get_fobj(mobj);
274 
275 		if (!fobj) {
276 			res = TEE_ERROR_GENERIC;
277 			goto err_rem_reg;
278 		}
279 
280 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
281 		fobj_put(fobj);
282 		if (res)
283 			goto err_rem_reg;
284 	}
285 
286 	/*
287 	 * If the context currently is active set it again to update
288 	 * the mapping.
289 	 */
290 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
291 		vm_set_ctx(uctx->ts_ctx);
292 
293 	*va = reg->va;
294 
295 	return TEE_SUCCESS;
296 
297 err_rem_reg:
298 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
299 err_put_mobj:
300 	mobj_put(reg->mobj);
301 err_free_reg:
302 	free(reg);
303 	return res;
304 }
305 
306 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
307 {
308 	struct vm_region *r = NULL;
309 
310 	TAILQ_FOREACH(r, &vm_info->regions, link)
311 		if (va >= r->va && va < r->va + r->size)
312 			return r;
313 
314 	return NULL;
315 }
316 
317 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
318 				   size_t len,
319 				   bool (*cmp_regs)(const struct vm_region *r0,
320 						    const struct vm_region *r,
321 						    const struct vm_region *rn))
322 {
323 	struct vm_region *r = r0;
324 	vaddr_t end_va = 0;
325 
326 	if (ADD_OVERFLOW(va, len, &end_va))
327 		return false;
328 
329 	while (true) {
330 		struct vm_region *r_next = TAILQ_NEXT(r, link);
331 		vaddr_t r_end_va = r->va + r->size;
332 
333 		if (r_end_va >= end_va)
334 			return true;
335 		if (!r_next)
336 			return false;
337 		if (r_end_va != r_next->va)
338 			return false;
339 		if (cmp_regs && !cmp_regs(r0, r, r_next))
340 			return false;
341 		r = r_next;
342 	}
343 }
344 
345 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
346 				  struct vm_region *r, vaddr_t va)
347 {
348 	struct vm_region *r2 = NULL;
349 	size_t diff = va - r->va;
350 
351 	assert(diff && diff < r->size);
352 
353 	r2 = calloc(1, sizeof(*r2));
354 	if (!r2)
355 		return TEE_ERROR_OUT_OF_MEMORY;
356 
357 	if (mobj_is_paged(r->mobj)) {
358 		TEE_Result res = tee_pager_split_um_region(uctx, va);
359 
360 		if (res) {
361 			free(r2);
362 			return res;
363 		}
364 	}
365 
366 	r2->mobj = mobj_get(r->mobj);
367 	r2->offset = r->offset + diff;
368 	r2->va = va;
369 	r2->size = r->size - diff;
370 	r2->attr = r->attr;
371 	r2->flags = r->flags;
372 
373 	r->size = diff;
374 
375 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
376 
377 	return TEE_SUCCESS;
378 }
379 
380 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
381 				 size_t len,
382 				 bool (*cmp_regs)(const struct vm_region *r0,
383 						  const struct vm_region *r,
384 						  const struct vm_region *rn),
385 				 struct vm_region **r0_ret)
386 {
387 	TEE_Result res = TEE_SUCCESS;
388 	struct vm_region *r = NULL;
389 	vaddr_t end_va = 0;
390 
391 	if ((va | len) & SMALL_PAGE_MASK)
392 		return TEE_ERROR_BAD_PARAMETERS;
393 
394 	if (ADD_OVERFLOW(va, len, &end_va))
395 		return TEE_ERROR_BAD_PARAMETERS;
396 
397 	/*
398 	 * Find first vm_region in range and check that the entire range is
399 	 * contiguous.
400 	 */
401 	r = find_vm_region(&uctx->vm_info, va);
402 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
403 		return TEE_ERROR_BAD_PARAMETERS;
404 
405 	/*
406 	 * If needed split regions so that va and len covers only complete
407 	 * regions.
408 	 */
409 	if (va != r->va) {
410 		res = split_vm_region(uctx, r, va);
411 		if (res)
412 			return res;
413 		r = TAILQ_NEXT(r, link);
414 	}
415 
416 	*r0_ret = r;
417 	r = find_vm_region(&uctx->vm_info, va + len - 1);
418 	if (!r)
419 		return TEE_ERROR_BAD_PARAMETERS;
420 	if (end_va != r->va + r->size) {
421 		res = split_vm_region(uctx, r, end_va);
422 		if (res)
423 			return res;
424 	}
425 
426 	return TEE_SUCCESS;
427 }
428 
429 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
430 {
431 	struct vm_region *r_next = NULL;
432 	struct vm_region *r = NULL;
433 	vaddr_t end_va = 0;
434 
435 	if (ADD_OVERFLOW(va, len, &end_va))
436 		return;
437 
438 	tee_pager_merge_um_region(uctx, va, len);
439 
440 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
441 		r_next = TAILQ_NEXT(r, link);
442 		if (!r_next)
443 			return;
444 
445 		/* Try merging with the region just before va */
446 		if (r->va + r->size < va)
447 			continue;
448 
449 		/*
450 		 * If r->va is well past our range we're done.
451 		 * Note that if it's just the page after our range we'll
452 		 * try to merge.
453 		 */
454 		if (r->va > end_va)
455 			return;
456 
457 		if (r->va + r->size != r_next->va)
458 			continue;
459 		if (r->mobj != r_next->mobj ||
460 		    r->flags != r_next->flags ||
461 		    r->attr != r_next->attr)
462 			continue;
463 		if (r->offset + r->size != r_next->offset)
464 			continue;
465 
466 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
467 		r->size += r_next->size;
468 		mobj_put(r_next->mobj);
469 		free(r_next);
470 		r_next = r;
471 	}
472 }
473 
474 static bool cmp_region_for_remap(const struct vm_region *r0,
475 				 const struct vm_region *r,
476 				 const struct vm_region *rn)
477 {
478 	/*
479 	 * All the essentionals has to match for remap to make sense. The
480 	 * essentials are, mobj/fobj, attr, flags and the offset should be
481 	 * contiguous.
482 	 *
483 	 * Note that vm_remap() depends on mobj/fobj to be the same.
484 	 */
485 	return r0->flags == r->flags && r0->attr == r->attr &&
486 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
487 }
488 
489 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
490 		    size_t len, size_t pad_begin, size_t pad_end)
491 {
492 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
493 	TEE_Result res = TEE_SUCCESS;
494 	struct vm_region *r0 = NULL;
495 	struct vm_region *r = NULL;
496 	struct vm_region *r_next = NULL;
497 	struct vm_region *r_last = NULL;
498 	struct vm_region *r_first = NULL;
499 	struct fobj *fobj = NULL;
500 	vaddr_t next_va = 0;
501 
502 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
503 
504 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
505 		return TEE_ERROR_BAD_PARAMETERS;
506 
507 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
508 	if (res)
509 		return res;
510 
511 	if (mobj_is_paged(r0->mobj)) {
512 		fobj = mobj_get_fobj(r0->mobj);
513 		if (!fobj)
514 			panic();
515 	}
516 
517 	for (r = r0; r; r = r_next) {
518 		if (r->va + r->size > old_va + len)
519 			break;
520 		r_next = TAILQ_NEXT(r, link);
521 		rem_um_region(uctx, r);
522 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
523 		TAILQ_INSERT_TAIL(&regs, r, link);
524 	}
525 
526 	/*
527 	 * Synchronize change to translation tables. Even though the pager
528 	 * case unmaps immediately we may still free a translation table.
529 	 */
530 	vm_set_ctx(uctx->ts_ctx);
531 
532 	r_first = TAILQ_FIRST(&regs);
533 	while (!TAILQ_EMPTY(&regs)) {
534 		r = TAILQ_FIRST(&regs);
535 		TAILQ_REMOVE(&regs, r, link);
536 		if (r_last) {
537 			r->va = r_last->va + r_last->size;
538 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
539 		} else {
540 			r->va = *new_va;
541 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
542 					      pad_end + len - r->size, 0);
543 		}
544 		if (!res)
545 			r_last = r;
546 		if (!res)
547 			res = alloc_pgt(uctx);
548 		if (fobj && !res)
549 			res = tee_pager_add_um_region(uctx, r->va, fobj,
550 						      r->attr);
551 
552 		if (res) {
553 			/*
554 			 * Something went wrong move all the recently added
555 			 * regions back to regs for later reinsertion at
556 			 * the original spot.
557 			 */
558 			struct vm_region *r_tmp = NULL;
559 			struct vm_region *r_stop = NULL;
560 
561 			if (r != r_last) {
562 				/*
563 				 * umap_add_region() failed, move r back to
564 				 * regs before all the rest are moved back.
565 				 */
566 				TAILQ_INSERT_HEAD(&regs, r, link);
567 			}
568 			if (r_last)
569 				r_stop = TAILQ_NEXT(r_last, link);
570 			for (r = r_first; r != r_stop; r = r_next) {
571 				r_next = TAILQ_NEXT(r, link);
572 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
573 				if (r_tmp)
574 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
575 							   link);
576 				else
577 					TAILQ_INSERT_HEAD(&regs, r, link);
578 				r_tmp = r;
579 			}
580 
581 			goto err_restore_map;
582 		}
583 	}
584 
585 	fobj_put(fobj);
586 
587 	vm_set_ctx(uctx->ts_ctx);
588 	*new_va = r_first->va;
589 
590 	return TEE_SUCCESS;
591 
592 err_restore_map:
593 	next_va = old_va;
594 	while (!TAILQ_EMPTY(&regs)) {
595 		r = TAILQ_FIRST(&regs);
596 		TAILQ_REMOVE(&regs, r, link);
597 		r->va = next_va;
598 		next_va += r->size;
599 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
600 			panic("Cannot restore mapping");
601 		if (alloc_pgt(uctx))
602 			panic("Cannot restore mapping");
603 		if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
604 			panic("Cannot restore mapping");
605 	}
606 	fobj_put(fobj);
607 	vm_set_ctx(uctx->ts_ctx);
608 
609 	return res;
610 }
611 
612 static bool cmp_region_for_get_flags(const struct vm_region *r0,
613 				     const struct vm_region *r,
614 				     const struct vm_region *rn __unused)
615 {
616 	return r0->flags == r->flags;
617 }
618 
619 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
620 			uint32_t *flags)
621 {
622 	struct vm_region *r = NULL;
623 
624 	if (!len || ((len | va) & SMALL_PAGE_MASK))
625 		return TEE_ERROR_BAD_PARAMETERS;
626 
627 	r = find_vm_region(&uctx->vm_info, va);
628 	if (!r)
629 		return TEE_ERROR_BAD_PARAMETERS;
630 
631 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
632 		return TEE_ERROR_BAD_PARAMETERS;
633 
634 	*flags = r->flags;
635 
636 	return TEE_SUCCESS;
637 }
638 
639 static bool cmp_region_for_get_prot(const struct vm_region *r0,
640 				    const struct vm_region *r,
641 				    const struct vm_region *rn __unused)
642 {
643 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
644 	       (r->attr & TEE_MATTR_PROT_MASK);
645 }
646 
647 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
648 		       uint16_t *prot)
649 {
650 	struct vm_region *r = NULL;
651 
652 	if (!len || ((len | va) & SMALL_PAGE_MASK))
653 		return TEE_ERROR_BAD_PARAMETERS;
654 
655 	r = find_vm_region(&uctx->vm_info, va);
656 	if (!r)
657 		return TEE_ERROR_BAD_PARAMETERS;
658 
659 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
660 		return TEE_ERROR_BAD_PARAMETERS;
661 
662 	*prot = r->attr & TEE_MATTR_PROT_MASK;
663 
664 	return TEE_SUCCESS;
665 }
666 
667 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
668 		       uint32_t prot)
669 {
670 	TEE_Result res = TEE_SUCCESS;
671 	struct vm_region *r0 = NULL;
672 	struct vm_region *r = NULL;
673 	bool was_writeable = false;
674 	bool need_sync = false;
675 
676 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
677 
678 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
679 		return TEE_ERROR_BAD_PARAMETERS;
680 
681 	res = split_vm_range(uctx, va, len, NULL, &r0);
682 	if (res)
683 		return res;
684 
685 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
686 		if (r->va + r->size > va + len)
687 			break;
688 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
689 			was_writeable = true;
690 
691 		if (!mobj_is_paged(r->mobj))
692 			need_sync = true;
693 
694 		r->attr &= ~TEE_MATTR_PROT_MASK;
695 		r->attr |= prot;
696 	}
697 
698 	if (need_sync) {
699 		/* Synchronize changes to translation tables */
700 		vm_set_ctx(uctx->ts_ctx);
701 	}
702 
703 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
704 		if (r->va + r->size > va + len)
705 			break;
706 		if (mobj_is_paged(r->mobj)) {
707 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
708 							  prot))
709 				panic();
710 		} else if (was_writeable) {
711 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
712 				       r->size);
713 		}
714 
715 	}
716 	if (need_sync && was_writeable)
717 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
718 
719 	merge_vm_range(uctx, va, len);
720 
721 	return TEE_SUCCESS;
722 }
723 
724 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
725 {
726 	TAILQ_REMOVE(&vmi->regions, reg, link);
727 	mobj_put(reg->mobj);
728 	free(reg);
729 }
730 
731 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
732 {
733 	TEE_Result res = TEE_SUCCESS;
734 	struct vm_region *r = NULL;
735 	struct vm_region *r_next = NULL;
736 	size_t end_va = 0;
737 	size_t unmap_end_va = 0;
738 	size_t l = 0;
739 
740 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
741 
742 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
743 		return TEE_ERROR_BAD_PARAMETERS;
744 
745 	if (!l || (va & SMALL_PAGE_MASK))
746 		return TEE_ERROR_BAD_PARAMETERS;
747 
748 	if (ADD_OVERFLOW(va, l, &end_va))
749 		return TEE_ERROR_BAD_PARAMETERS;
750 
751 	res = split_vm_range(uctx, va, l, NULL, &r);
752 	if (res)
753 		return res;
754 
755 	while (true) {
756 		r_next = TAILQ_NEXT(r, link);
757 		unmap_end_va = r->va + r->size;
758 		rem_um_region(uctx, r);
759 		umap_remove_region(&uctx->vm_info, r);
760 		if (!r_next || unmap_end_va == end_va)
761 			break;
762 		r = r_next;
763 	}
764 
765 	return TEE_SUCCESS;
766 }
767 
768 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
769 {
770 	TEE_Result res = TEE_SUCCESS;
771 	struct mobj *mobj = NULL;
772 	size_t offs = 0;
773 	vaddr_t va = 0;
774 	size_t sz = 0;
775 	uint32_t prot = 0;
776 
777 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
778 	if (sz) {
779 		prot = TEE_MATTR_PRX;
780 		if (IS_ENABLED(CFG_CORE_BTI))
781 			prot |= TEE_MATTR_GUARDED;
782 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
783 			     mobj, offs);
784 		if (res)
785 			return res;
786 	}
787 
788 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
789 	if (sz)
790 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
791 			      mobj, offs);
792 
793 	return TEE_SUCCESS;
794 }
795 
796 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
797 {
798 	TEE_Result res;
799 	uint32_t asid = asid_alloc();
800 
801 	if (!asid) {
802 		DMSG("Failed to allocate ASID");
803 		return TEE_ERROR_GENERIC;
804 	}
805 
806 	memset(uctx, 0, sizeof(*uctx));
807 	TAILQ_INIT(&uctx->vm_info.regions);
808 	uctx->vm_info.asid = asid;
809 	uctx->ts_ctx = ts_ctx;
810 
811 	res = map_kinit(uctx);
812 	if (res)
813 		vm_info_final(uctx);
814 	return res;
815 }
816 
817 void vm_clean_param(struct user_mode_ctx *uctx)
818 {
819 	struct vm_region *next_r;
820 	struct vm_region *r;
821 
822 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
823 		if (r->flags & VM_FLAG_EPHEMERAL) {
824 			rem_um_region(uctx, r);
825 			umap_remove_region(&uctx->vm_info, r);
826 		}
827 	}
828 }
829 
830 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
831 {
832 	struct vm_region *r = NULL;
833 
834 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
835 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
836 }
837 
838 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
839 				       struct param_mem *mem, void **user_va)
840 {
841 	struct vm_region *region = NULL;
842 
843 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
844 		vaddr_t va = 0;
845 		size_t phys_offs = 0;
846 
847 		if (!(region->flags & VM_FLAG_EPHEMERAL))
848 			continue;
849 		if (mem->mobj != region->mobj)
850 			continue;
851 
852 		phys_offs = mobj_get_phys_offs(mem->mobj,
853 					       CORE_MMU_USER_PARAM_SIZE);
854 		phys_offs += mem->offs;
855 		if (phys_offs < region->offset)
856 			continue;
857 		if (phys_offs >= (region->offset + region->size))
858 			continue;
859 		va = region->va + phys_offs - region->offset;
860 		*user_va = (void *)va;
861 		return TEE_SUCCESS;
862 	}
863 	return TEE_ERROR_GENERIC;
864 }
865 
866 static int cmp_param_mem(const void *a0, const void *a1)
867 {
868 	const struct param_mem *m1 = a1;
869 	const struct param_mem *m0 = a0;
870 	int ret;
871 
872 	/* Make sure that invalid param_mem are placed last in the array */
873 	if (!m0->mobj && !m1->mobj)
874 		return 0;
875 	if (!m0->mobj)
876 		return 1;
877 	if (!m1->mobj)
878 		return -1;
879 
880 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
881 	if (ret)
882 		return ret;
883 
884 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
885 	if (ret)
886 		return ret;
887 
888 	ret = CMP_TRILEAN(m0->offs, m1->offs);
889 	if (ret)
890 		return ret;
891 
892 	return CMP_TRILEAN(m0->size, m1->size);
893 }
894 
895 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
896 			void *param_va[TEE_NUM_PARAMS])
897 {
898 	TEE_Result res = TEE_SUCCESS;
899 	size_t n;
900 	size_t m;
901 	struct param_mem mem[TEE_NUM_PARAMS];
902 
903 	memset(mem, 0, sizeof(mem));
904 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
905 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
906 		size_t phys_offs;
907 
908 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
909 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
910 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
911 			continue;
912 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
913 					       CORE_MMU_USER_PARAM_SIZE);
914 		mem[n].mobj = param->u[n].mem.mobj;
915 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
916 					CORE_MMU_USER_PARAM_SIZE);
917 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
918 				      mem[n].offs + param->u[n].mem.size,
919 				      CORE_MMU_USER_PARAM_SIZE);
920 		/*
921 		 * For size 0 (raw pointer parameter), add minimum size
922 		 * value to allow address to be mapped
923 		 */
924 		if (!mem[n].size)
925 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
926 	}
927 
928 	/*
929 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
930 	 * mobj pointer value since those entries can't be merged either,
931 	 * finally by offset.
932 	 *
933 	 * This should result in a list where all mergeable entries are
934 	 * next to each other and unused/invalid entries are at the end.
935 	 */
936 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
937 
938 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
939 		if (mem[n].mobj == mem[m].mobj &&
940 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
941 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
942 					      mem[n].offs, mem[n].size))) {
943 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
944 			continue;
945 		}
946 		m++;
947 		if (n != m)
948 			mem[m] = mem[n];
949 	}
950 	/*
951 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
952 	 * index of the last valid entry if the first entry is valid, else
953 	 * 0.
954 	 */
955 	if (mem[0].mobj)
956 		m++;
957 
958 	check_param_map_empty(uctx);
959 
960 	for (n = 0; n < m; n++) {
961 		vaddr_t va = 0;
962 
963 		res = vm_map(uctx, &va, mem[n].size,
964 			     TEE_MATTR_PRW | TEE_MATTR_URW,
965 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
966 			     mem[n].mobj, mem[n].offs);
967 		if (res)
968 			goto out;
969 	}
970 
971 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
972 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
973 
974 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
975 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
976 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
977 			continue;
978 		if (!param->u[n].mem.mobj)
979 			continue;
980 
981 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
982 					   param_va + n);
983 		if (res != TEE_SUCCESS)
984 			goto out;
985 	}
986 
987 	res = alloc_pgt(uctx);
988 out:
989 	if (res)
990 		vm_clean_param(uctx);
991 
992 	return res;
993 }
994 
995 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
996 			vaddr_t *va)
997 {
998 	TEE_Result res = TEE_SUCCESS;
999 	struct vm_region *reg = NULL;
1000 
1001 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
1002 		return TEE_ERROR_BAD_PARAMETERS;
1003 
1004 	reg = calloc(1, sizeof(*reg));
1005 	if (!reg)
1006 		return TEE_ERROR_OUT_OF_MEMORY;
1007 
1008 	reg->mobj = mobj;
1009 	reg->offset = 0;
1010 	reg->va = 0;
1011 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1012 	reg->attr = TEE_MATTR_SECURE;
1013 
1014 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1015 	if (res) {
1016 		free(reg);
1017 		return res;
1018 	}
1019 
1020 	res = alloc_pgt(uctx);
1021 	if (res)
1022 		umap_remove_region(&uctx->vm_info, reg);
1023 	else
1024 		*va = reg->va;
1025 
1026 	return res;
1027 }
1028 
1029 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1030 {
1031 	struct vm_region *r = NULL;
1032 
1033 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1034 		if (r->mobj == mobj && r->va == va) {
1035 			rem_um_region(uctx, r);
1036 			umap_remove_region(&uctx->vm_info, r);
1037 			return;
1038 		}
1039 	}
1040 }
1041 
1042 void vm_info_final(struct user_mode_ctx *uctx)
1043 {
1044 	if (!uctx->vm_info.asid)
1045 		return;
1046 
1047 	/* clear MMU entries to avoid clash when asid is reused */
1048 	tlbi_asid(uctx->vm_info.asid);
1049 
1050 	asid_free(uctx->vm_info.asid);
1051 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1052 		umap_remove_region(&uctx->vm_info,
1053 				   TAILQ_FIRST(&uctx->vm_info.regions));
1054 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1055 }
1056 
1057 /* return true only if buffer fits inside TA private memory */
1058 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1059 				 const void *va, size_t size)
1060 {
1061 	struct vm_region *r = NULL;
1062 
1063 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1064 		if (r->flags & VM_FLAGS_NONPRIV)
1065 			continue;
1066 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1067 			return true;
1068 	}
1069 
1070 	return false;
1071 }
1072 
1073 /* return true only if buffer intersects TA private memory */
1074 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1075 				  const void *va, size_t size)
1076 {
1077 	struct vm_region *r = NULL;
1078 
1079 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1080 		if (r->attr & VM_FLAGS_NONPRIV)
1081 			continue;
1082 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1083 			return true;
1084 	}
1085 
1086 	return false;
1087 }
1088 
1089 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1090 			       const void *va, size_t size,
1091 			       struct mobj **mobj, size_t *offs)
1092 {
1093 	struct vm_region *r = NULL;
1094 
1095 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1096 		if (!r->mobj)
1097 			continue;
1098 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1099 			size_t poffs;
1100 
1101 			poffs = mobj_get_phys_offs(r->mobj,
1102 						   CORE_MMU_USER_PARAM_SIZE);
1103 			*mobj = r->mobj;
1104 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1105 			return TEE_SUCCESS;
1106 		}
1107 	}
1108 
1109 	return TEE_ERROR_BAD_PARAMETERS;
1110 }
1111 
1112 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1113 					  void *ua, paddr_t *pa, uint32_t *attr)
1114 {
1115 	struct vm_region *region = NULL;
1116 
1117 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1118 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1119 					   region->size))
1120 			continue;
1121 
1122 		if (pa) {
1123 			TEE_Result res;
1124 			paddr_t p;
1125 			size_t offset;
1126 			size_t granule;
1127 
1128 			/*
1129 			 * mobj and input user address may each include
1130 			 * a specific offset-in-granule position.
1131 			 * Drop both to get target physical page base
1132 			 * address then apply only user address
1133 			 * offset-in-granule.
1134 			 * Mapping lowest granule is the small page.
1135 			 */
1136 			granule = MAX(region->mobj->phys_granule,
1137 				      (size_t)SMALL_PAGE_SIZE);
1138 			assert(!granule || IS_POWER_OF_TWO(granule));
1139 
1140 			offset = region->offset +
1141 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1142 
1143 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1144 			if (res != TEE_SUCCESS)
1145 				return res;
1146 
1147 			*pa = p | ((vaddr_t)ua & (granule - 1));
1148 		}
1149 		if (attr)
1150 			*attr = region->attr;
1151 
1152 		return TEE_SUCCESS;
1153 	}
1154 
1155 	return TEE_ERROR_ACCESS_DENIED;
1156 }
1157 
1158 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1159 {
1160 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1161 }
1162 
1163 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1164 {
1165 	paddr_t p = 0;
1166 	struct vm_region *region = NULL;
1167 
1168 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1169 		size_t granule = 0;
1170 		size_t size = 0;
1171 		size_t ofs = 0;
1172 
1173 		/* pa2va is expected only for memory tracked through mobj */
1174 		if (!region->mobj)
1175 			continue;
1176 
1177 		/* Physically granulated memory object must be scanned */
1178 		granule = region->mobj->phys_granule;
1179 		assert(!granule || IS_POWER_OF_TWO(granule));
1180 
1181 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1182 
1183 			if (granule) {
1184 				/* From current offset to buffer/granule end */
1185 				size = granule - (ofs & (granule - 1));
1186 
1187 				if (size > (region->size - ofs))
1188 					size = region->size - ofs;
1189 			} else {
1190 				size = region->size;
1191 			}
1192 
1193 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1194 				continue;
1195 
1196 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1197 				/* Remove region offset (mobj phys offset) */
1198 				ofs -= region->offset;
1199 				/* Get offset-in-granule */
1200 				p = pa - p;
1201 
1202 				return (void *)(region->va + ofs + (vaddr_t)p);
1203 			}
1204 		}
1205 	}
1206 
1207 	return NULL;
1208 }
1209 
1210 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1211 				  uint32_t flags, uaddr_t uaddr, size_t len)
1212 {
1213 	uaddr_t a = 0;
1214 	uaddr_t end_addr = 0;
1215 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1216 			       CORE_MMU_USER_PARAM_SIZE);
1217 
1218 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1219 		return TEE_ERROR_ACCESS_DENIED;
1220 
1221 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1222 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1223 		return TEE_ERROR_ACCESS_DENIED;
1224 
1225 	/*
1226 	 * Rely on TA private memory test to check if address range is private
1227 	 * to TA or not.
1228 	 */
1229 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1230 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1231 		return TEE_ERROR_ACCESS_DENIED;
1232 
1233 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1234 		uint32_t attr;
1235 		TEE_Result res;
1236 
1237 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1238 		if (res != TEE_SUCCESS)
1239 			return res;
1240 
1241 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1242 		    (attr & TEE_MATTR_SECURE))
1243 			return TEE_ERROR_ACCESS_DENIED;
1244 
1245 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1246 		    !(attr & TEE_MATTR_SECURE))
1247 			return TEE_ERROR_ACCESS_DENIED;
1248 
1249 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1250 			return TEE_ERROR_ACCESS_DENIED;
1251 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1252 			return TEE_ERROR_ACCESS_DENIED;
1253 	}
1254 
1255 	return TEE_SUCCESS;
1256 }
1257 
1258 void vm_set_ctx(struct ts_ctx *ctx)
1259 {
1260 	struct thread_specific_data *tsd = thread_get_tsd();
1261 
1262 	core_mmu_set_user_map(NULL);
1263 	/*
1264 	 * No matter what happens below, the current user TA will not be
1265 	 * current any longer. Make sure pager is in sync with that.
1266 	 * This function has to be called before there's a chance that
1267 	 * pgt_free_unlocked() is called.
1268 	 */
1269 	pgt_free(&tsd->pgt_cache);
1270 
1271 	if (is_user_mode_ctx(ctx)) {
1272 		struct core_mmu_user_map map = { };
1273 		struct user_mode_ctx *uctx = to_user_mode_ctx(ctx);
1274 
1275 		core_mmu_create_user_map(uctx, &map);
1276 		core_mmu_set_user_map(&map);
1277 		tee_pager_assign_um_tables(uctx);
1278 	}
1279 	tsd->ctx = ctx;
1280 }
1281 
1282 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1283 			 uint16_t *prot, size_t *offs)
1284 {
1285 	struct vm_region *r = NULL;
1286 	size_t r_offs = 0;
1287 
1288 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1289 		return NULL;
1290 
1291 	r = find_vm_region(&uctx->vm_info, va);
1292 	if (!r)
1293 		return NULL;
1294 
1295 	r_offs = va - r->va;
1296 
1297 	*len = MIN(r->size - r_offs, *len);
1298 	*offs = r->offset + r_offs;
1299 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1300 	return mobj_get(r->mobj);
1301 }
1302