xref: /optee_os/core/mm/vm.c (revision e17e7a562ac31d52e9604977df3b4c6dd1d8c404)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(&uctx->vm_info)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_alloc(&uctx->pgt_cache, uctx->ts_ctx, &uctx->vm_info);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
138 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
139 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
140 	struct vm_region *r2 = NULL;
141 
142 	if (mobj_is_paged(r->mobj)) {
143 		tee_pager_rem_um_region(uctx, r->va, r->size);
144 	} else {
145 		pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
146 				    r->va + r->size);
147 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
148 				    uctx->vm_info.asid);
149 	}
150 
151 	r2 = TAILQ_NEXT(r, link);
152 	if (r2)
153 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
154 
155 	r2 = TAILQ_PREV(r, vm_region_head, link);
156 	if (r2)
157 		begin = MAX(begin,
158 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
159 
160 	/* If there's no unused page tables, there's nothing left to do */
161 	if (begin >= last)
162 		return;
163 
164 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
165 }
166 
167 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
168 				  size_t pad_begin, size_t pad_end,
169 				  size_t align)
170 {
171 	struct vm_region dummy_first_reg = { };
172 	struct vm_region dummy_last_reg = { };
173 	struct vm_region *r = NULL;
174 	struct vm_region *prev_r = NULL;
175 	vaddr_t va_range_base = 0;
176 	size_t va_range_size = 0;
177 	size_t granul;
178 	vaddr_t va = 0;
179 	size_t offs_plus_size = 0;
180 
181 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
182 	dummy_first_reg.va = va_range_base;
183 	dummy_last_reg.va = va_range_base + va_range_size;
184 
185 	/* Check alignment, it has to be at least SMALL_PAGE based */
186 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
187 		return TEE_ERROR_ACCESS_CONFLICT;
188 
189 	/* Check that the mobj is defined for the entire range */
190 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
191 		return TEE_ERROR_BAD_PARAMETERS;
192 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
193 		return TEE_ERROR_BAD_PARAMETERS;
194 
195 	granul = MAX(align, SMALL_PAGE_SIZE);
196 	if (!IS_POWER_OF_TWO(granul))
197 		return TEE_ERROR_BAD_PARAMETERS;
198 
199 	prev_r = &dummy_first_reg;
200 	TAILQ_FOREACH(r, &vmi->regions, link) {
201 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
202 					granul);
203 		if (va) {
204 			reg->va = va;
205 			TAILQ_INSERT_BEFORE(r, reg, link);
206 			return TEE_SUCCESS;
207 		}
208 		prev_r = r;
209 	}
210 
211 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
212 	if (!r)
213 		r = &dummy_first_reg;
214 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
215 				granul);
216 	if (va) {
217 		reg->va = va;
218 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
219 		return TEE_SUCCESS;
220 	}
221 
222 	return TEE_ERROR_ACCESS_CONFLICT;
223 }
224 
225 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
226 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
227 		      size_t offs, size_t pad_begin, size_t pad_end,
228 		      size_t align)
229 {
230 	TEE_Result res = TEE_SUCCESS;
231 	struct vm_region *reg = NULL;
232 	uint32_t attr = 0;
233 
234 	if (prot & ~TEE_MATTR_PROT_MASK)
235 		return TEE_ERROR_BAD_PARAMETERS;
236 
237 	reg = calloc(1, sizeof(*reg));
238 	if (!reg)
239 		return TEE_ERROR_OUT_OF_MEMORY;
240 
241 	if (!mobj_is_paged(mobj)) {
242 		uint32_t mem_type = 0;
243 
244 		res = mobj_get_mem_type(mobj, &mem_type);
245 		if (res)
246 			goto err_free_reg;
247 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
248 	}
249 	attr |= TEE_MATTR_VALID_BLOCK;
250 	if (mobj_is_secure(mobj))
251 		attr |= TEE_MATTR_SECURE;
252 
253 	reg->mobj = mobj_get(mobj);
254 	reg->offset = offs;
255 	reg->va = *va;
256 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
257 	reg->attr = attr | prot;
258 	reg->flags = flags;
259 
260 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
261 	if (res)
262 		goto err_put_mobj;
263 
264 	res = alloc_pgt(uctx);
265 	if (res)
266 		goto err_rem_reg;
267 
268 	if (mobj_is_paged(mobj)) {
269 		struct fobj *fobj = mobj_get_fobj(mobj);
270 
271 		if (!fobj) {
272 			res = TEE_ERROR_GENERIC;
273 			goto err_rem_reg;
274 		}
275 
276 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
277 		fobj_put(fobj);
278 		if (res)
279 			goto err_rem_reg;
280 	}
281 
282 	/*
283 	 * If the context currently is active set it again to update
284 	 * the mapping.
285 	 */
286 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
287 		vm_set_ctx(uctx->ts_ctx);
288 
289 	*va = reg->va;
290 
291 	return TEE_SUCCESS;
292 
293 err_rem_reg:
294 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
295 err_put_mobj:
296 	mobj_put(reg->mobj);
297 err_free_reg:
298 	free(reg);
299 	return res;
300 }
301 
302 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
303 {
304 	struct vm_region *r = NULL;
305 
306 	TAILQ_FOREACH(r, &vm_info->regions, link)
307 		if (va >= r->va && va < r->va + r->size)
308 			return r;
309 
310 	return NULL;
311 }
312 
313 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
314 				   size_t len,
315 				   bool (*cmp_regs)(const struct vm_region *r0,
316 						    const struct vm_region *r,
317 						    const struct vm_region *rn))
318 {
319 	struct vm_region *r = r0;
320 	vaddr_t end_va = 0;
321 
322 	if (ADD_OVERFLOW(va, len, &end_va))
323 		return false;
324 
325 	while (true) {
326 		struct vm_region *r_next = TAILQ_NEXT(r, link);
327 		vaddr_t r_end_va = r->va + r->size;
328 
329 		if (r_end_va >= end_va)
330 			return true;
331 		if (!r_next)
332 			return false;
333 		if (r_end_va != r_next->va)
334 			return false;
335 		if (cmp_regs && !cmp_regs(r0, r, r_next))
336 			return false;
337 		r = r_next;
338 	}
339 }
340 
341 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
342 				  struct vm_region *r, vaddr_t va)
343 {
344 	struct vm_region *r2 = NULL;
345 	size_t diff = va - r->va;
346 
347 	assert(diff && diff < r->size);
348 
349 	r2 = calloc(1, sizeof(*r2));
350 	if (!r2)
351 		return TEE_ERROR_OUT_OF_MEMORY;
352 
353 	if (mobj_is_paged(r->mobj)) {
354 		TEE_Result res = tee_pager_split_um_region(uctx, va);
355 
356 		if (res) {
357 			free(r2);
358 			return res;
359 		}
360 	}
361 
362 	r2->mobj = mobj_get(r->mobj);
363 	r2->offset = r->offset + diff;
364 	r2->va = va;
365 	r2->size = r->size - diff;
366 	r2->attr = r->attr;
367 	r2->flags = r->flags;
368 
369 	r->size = diff;
370 
371 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
372 
373 	return TEE_SUCCESS;
374 }
375 
376 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
377 				 size_t len,
378 				 bool (*cmp_regs)(const struct vm_region *r0,
379 						  const struct vm_region *r,
380 						  const struct vm_region *rn),
381 				 struct vm_region **r0_ret)
382 {
383 	TEE_Result res = TEE_SUCCESS;
384 	struct vm_region *r = NULL;
385 	vaddr_t end_va = 0;
386 
387 	if ((va | len) & SMALL_PAGE_MASK)
388 		return TEE_ERROR_BAD_PARAMETERS;
389 
390 	if (ADD_OVERFLOW(va, len, &end_va))
391 		return TEE_ERROR_BAD_PARAMETERS;
392 
393 	/*
394 	 * Find first vm_region in range and check that the entire range is
395 	 * contiguous.
396 	 */
397 	r = find_vm_region(&uctx->vm_info, va);
398 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
399 		return TEE_ERROR_BAD_PARAMETERS;
400 
401 	/*
402 	 * If needed split regions so that va and len covers only complete
403 	 * regions.
404 	 */
405 	if (va != r->va) {
406 		res = split_vm_region(uctx, r, va);
407 		if (res)
408 			return res;
409 		r = TAILQ_NEXT(r, link);
410 	}
411 
412 	*r0_ret = r;
413 	r = find_vm_region(&uctx->vm_info, va + len - 1);
414 	if (!r)
415 		return TEE_ERROR_BAD_PARAMETERS;
416 	if (end_va != r->va + r->size) {
417 		res = split_vm_region(uctx, r, end_va);
418 		if (res)
419 			return res;
420 	}
421 
422 	return TEE_SUCCESS;
423 }
424 
425 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
426 {
427 	struct vm_region *r_next = NULL;
428 	struct vm_region *r = NULL;
429 	vaddr_t end_va = 0;
430 
431 	if (ADD_OVERFLOW(va, len, &end_va))
432 		return;
433 
434 	tee_pager_merge_um_region(uctx, va, len);
435 
436 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
437 		r_next = TAILQ_NEXT(r, link);
438 		if (!r_next)
439 			return;
440 
441 		/* Try merging with the region just before va */
442 		if (r->va + r->size < va)
443 			continue;
444 
445 		/*
446 		 * If r->va is well past our range we're done.
447 		 * Note that if it's just the page after our range we'll
448 		 * try to merge.
449 		 */
450 		if (r->va > end_va)
451 			return;
452 
453 		if (r->va + r->size != r_next->va)
454 			continue;
455 		if (r->mobj != r_next->mobj ||
456 		    r->flags != r_next->flags ||
457 		    r->attr != r_next->attr)
458 			continue;
459 		if (r->offset + r->size != r_next->offset)
460 			continue;
461 
462 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
463 		r->size += r_next->size;
464 		mobj_put(r_next->mobj);
465 		free(r_next);
466 		r_next = r;
467 	}
468 }
469 
470 static bool cmp_region_for_remap(const struct vm_region *r0,
471 				 const struct vm_region *r,
472 				 const struct vm_region *rn)
473 {
474 	/*
475 	 * All the essentionals has to match for remap to make sense. The
476 	 * essentials are, mobj/fobj, attr, flags and the offset should be
477 	 * contiguous.
478 	 *
479 	 * Note that vm_remap() depends on mobj/fobj to be the same.
480 	 */
481 	return r0->flags == r->flags && r0->attr == r->attr &&
482 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
483 }
484 
485 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
486 		    size_t len, size_t pad_begin, size_t pad_end)
487 {
488 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
489 	TEE_Result res = TEE_SUCCESS;
490 	struct vm_region *r0 = NULL;
491 	struct vm_region *r = NULL;
492 	struct vm_region *r_next = NULL;
493 	struct vm_region *r_last = NULL;
494 	struct vm_region *r_first = NULL;
495 	struct fobj *fobj = NULL;
496 	vaddr_t next_va = 0;
497 
498 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
499 
500 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
501 		return TEE_ERROR_BAD_PARAMETERS;
502 
503 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
504 	if (res)
505 		return res;
506 
507 	if (mobj_is_paged(r0->mobj)) {
508 		fobj = mobj_get_fobj(r0->mobj);
509 		if (!fobj)
510 			panic();
511 	}
512 
513 	for (r = r0; r; r = r_next) {
514 		if (r->va + r->size > old_va + len)
515 			break;
516 		r_next = TAILQ_NEXT(r, link);
517 		rem_um_region(uctx, r);
518 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
519 		TAILQ_INSERT_TAIL(&regs, r, link);
520 	}
521 
522 	/*
523 	 * Synchronize change to translation tables. Even though the pager
524 	 * case unmaps immediately we may still free a translation table.
525 	 */
526 	vm_set_ctx(uctx->ts_ctx);
527 
528 	r_first = TAILQ_FIRST(&regs);
529 	while (!TAILQ_EMPTY(&regs)) {
530 		r = TAILQ_FIRST(&regs);
531 		TAILQ_REMOVE(&regs, r, link);
532 		if (r_last) {
533 			r->va = r_last->va + r_last->size;
534 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
535 		} else {
536 			r->va = *new_va;
537 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
538 					      pad_end + len - r->size, 0);
539 		}
540 		if (!res)
541 			r_last = r;
542 		if (!res)
543 			res = alloc_pgt(uctx);
544 		if (fobj && !res)
545 			res = tee_pager_add_um_region(uctx, r->va, fobj,
546 						      r->attr);
547 
548 		if (res) {
549 			/*
550 			 * Something went wrong move all the recently added
551 			 * regions back to regs for later reinsertion at
552 			 * the original spot.
553 			 */
554 			struct vm_region *r_tmp = NULL;
555 			struct vm_region *r_stop = NULL;
556 
557 			if (r != r_last) {
558 				/*
559 				 * umap_add_region() failed, move r back to
560 				 * regs before all the rest are moved back.
561 				 */
562 				TAILQ_INSERT_HEAD(&regs, r, link);
563 			}
564 			if (r_last)
565 				r_stop = TAILQ_NEXT(r_last, link);
566 			for (r = r_first; r != r_stop; r = r_next) {
567 				r_next = TAILQ_NEXT(r, link);
568 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
569 				if (r_tmp)
570 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
571 							   link);
572 				else
573 					TAILQ_INSERT_HEAD(&regs, r, link);
574 				r_tmp = r;
575 			}
576 
577 			goto err_restore_map;
578 		}
579 	}
580 
581 	fobj_put(fobj);
582 
583 	vm_set_ctx(uctx->ts_ctx);
584 	*new_va = r_first->va;
585 
586 	return TEE_SUCCESS;
587 
588 err_restore_map:
589 	next_va = old_va;
590 	while (!TAILQ_EMPTY(&regs)) {
591 		r = TAILQ_FIRST(&regs);
592 		TAILQ_REMOVE(&regs, r, link);
593 		r->va = next_va;
594 		next_va += r->size;
595 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
596 			panic("Cannot restore mapping");
597 		if (alloc_pgt(uctx))
598 			panic("Cannot restore mapping");
599 		if (fobj && tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
600 			panic("Cannot restore mapping");
601 	}
602 	fobj_put(fobj);
603 	vm_set_ctx(uctx->ts_ctx);
604 
605 	return res;
606 }
607 
608 static bool cmp_region_for_get_flags(const struct vm_region *r0,
609 				     const struct vm_region *r,
610 				     const struct vm_region *rn __unused)
611 {
612 	return r0->flags == r->flags;
613 }
614 
615 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
616 			uint32_t *flags)
617 {
618 	struct vm_region *r = NULL;
619 
620 	if (!len || ((len | va) & SMALL_PAGE_MASK))
621 		return TEE_ERROR_BAD_PARAMETERS;
622 
623 	r = find_vm_region(&uctx->vm_info, va);
624 	if (!r)
625 		return TEE_ERROR_BAD_PARAMETERS;
626 
627 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
628 		return TEE_ERROR_BAD_PARAMETERS;
629 
630 	*flags = r->flags;
631 
632 	return TEE_SUCCESS;
633 }
634 
635 static bool cmp_region_for_get_prot(const struct vm_region *r0,
636 				    const struct vm_region *r,
637 				    const struct vm_region *rn __unused)
638 {
639 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
640 	       (r->attr & TEE_MATTR_PROT_MASK);
641 }
642 
643 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
644 		       uint16_t *prot)
645 {
646 	struct vm_region *r = NULL;
647 
648 	if (!len || ((len | va) & SMALL_PAGE_MASK))
649 		return TEE_ERROR_BAD_PARAMETERS;
650 
651 	r = find_vm_region(&uctx->vm_info, va);
652 	if (!r)
653 		return TEE_ERROR_BAD_PARAMETERS;
654 
655 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
656 		return TEE_ERROR_BAD_PARAMETERS;
657 
658 	*prot = r->attr & TEE_MATTR_PROT_MASK;
659 
660 	return TEE_SUCCESS;
661 }
662 
663 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
664 		       uint32_t prot)
665 {
666 	TEE_Result res = TEE_SUCCESS;
667 	struct vm_region *r0 = NULL;
668 	struct vm_region *r = NULL;
669 	bool was_writeable = false;
670 	bool need_sync = false;
671 
672 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
673 
674 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
675 		return TEE_ERROR_BAD_PARAMETERS;
676 
677 	res = split_vm_range(uctx, va, len, NULL, &r0);
678 	if (res)
679 		return res;
680 
681 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
682 		if (r->va + r->size > va + len)
683 			break;
684 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
685 			was_writeable = true;
686 
687 		if (!mobj_is_paged(r->mobj))
688 			need_sync = true;
689 
690 		r->attr &= ~TEE_MATTR_PROT_MASK;
691 		r->attr |= prot;
692 	}
693 
694 	if (need_sync) {
695 		/* Synchronize changes to translation tables */
696 		vm_set_ctx(uctx->ts_ctx);
697 	}
698 
699 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
700 		if (r->va + r->size > va + len)
701 			break;
702 		if (mobj_is_paged(r->mobj)) {
703 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
704 							  prot))
705 				panic();
706 		} else if (was_writeable) {
707 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
708 				       r->size);
709 		}
710 
711 	}
712 	if (need_sync && was_writeable)
713 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
714 
715 	merge_vm_range(uctx, va, len);
716 
717 	return TEE_SUCCESS;
718 }
719 
720 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
721 {
722 	TAILQ_REMOVE(&vmi->regions, reg, link);
723 	mobj_put(reg->mobj);
724 	free(reg);
725 }
726 
727 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
728 {
729 	TEE_Result res = TEE_SUCCESS;
730 	struct vm_region *r = NULL;
731 	struct vm_region *r_next = NULL;
732 	size_t end_va = 0;
733 	size_t unmap_end_va = 0;
734 	size_t l = 0;
735 
736 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
737 
738 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
739 		return TEE_ERROR_BAD_PARAMETERS;
740 
741 	if (!l || (va & SMALL_PAGE_MASK))
742 		return TEE_ERROR_BAD_PARAMETERS;
743 
744 	if (ADD_OVERFLOW(va, l, &end_va))
745 		return TEE_ERROR_BAD_PARAMETERS;
746 
747 	res = split_vm_range(uctx, va, l, NULL, &r);
748 	if (res)
749 		return res;
750 
751 	while (true) {
752 		r_next = TAILQ_NEXT(r, link);
753 		unmap_end_va = r->va + r->size;
754 		rem_um_region(uctx, r);
755 		umap_remove_region(&uctx->vm_info, r);
756 		if (!r_next || unmap_end_va == end_va)
757 			break;
758 		r = r_next;
759 	}
760 
761 	return TEE_SUCCESS;
762 }
763 
764 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
765 {
766 	TEE_Result res = TEE_SUCCESS;
767 	struct mobj *mobj = NULL;
768 	size_t offs = 0;
769 	vaddr_t va = 0;
770 	size_t sz = 0;
771 	uint32_t prot = 0;
772 
773 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
774 	if (sz) {
775 		prot = TEE_MATTR_PRX;
776 		if (IS_ENABLED(CFG_CORE_BTI))
777 			prot |= TEE_MATTR_GUARDED;
778 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
779 			     mobj, offs);
780 		if (res)
781 			return res;
782 	}
783 
784 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
785 	if (sz)
786 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
787 			      mobj, offs);
788 
789 	return TEE_SUCCESS;
790 }
791 
792 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
793 {
794 	TEE_Result res;
795 	uint32_t asid = asid_alloc();
796 
797 	if (!asid) {
798 		DMSG("Failed to allocate ASID");
799 		return TEE_ERROR_GENERIC;
800 	}
801 
802 	memset(uctx, 0, sizeof(*uctx));
803 	TAILQ_INIT(&uctx->vm_info.regions);
804 	SLIST_INIT(&uctx->pgt_cache);
805 	uctx->vm_info.asid = asid;
806 	uctx->ts_ctx = ts_ctx;
807 
808 	res = map_kinit(uctx);
809 	if (res)
810 		vm_info_final(uctx);
811 	return res;
812 }
813 
814 void vm_clean_param(struct user_mode_ctx *uctx)
815 {
816 	struct vm_region *next_r;
817 	struct vm_region *r;
818 
819 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
820 		if (r->flags & VM_FLAG_EPHEMERAL) {
821 			rem_um_region(uctx, r);
822 			umap_remove_region(&uctx->vm_info, r);
823 		}
824 	}
825 }
826 
827 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
828 {
829 	struct vm_region *r = NULL;
830 
831 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
832 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
833 }
834 
835 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
836 				       struct param_mem *mem, void **user_va)
837 {
838 	struct vm_region *region = NULL;
839 
840 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
841 		vaddr_t va = 0;
842 		size_t phys_offs = 0;
843 
844 		if (!(region->flags & VM_FLAG_EPHEMERAL))
845 			continue;
846 		if (mem->mobj != region->mobj)
847 			continue;
848 
849 		phys_offs = mobj_get_phys_offs(mem->mobj,
850 					       CORE_MMU_USER_PARAM_SIZE);
851 		phys_offs += mem->offs;
852 		if (phys_offs < region->offset)
853 			continue;
854 		if (phys_offs >= (region->offset + region->size))
855 			continue;
856 		va = region->va + phys_offs - region->offset;
857 		*user_va = (void *)va;
858 		return TEE_SUCCESS;
859 	}
860 	return TEE_ERROR_GENERIC;
861 }
862 
863 static int cmp_param_mem(const void *a0, const void *a1)
864 {
865 	const struct param_mem *m1 = a1;
866 	const struct param_mem *m0 = a0;
867 	int ret;
868 
869 	/* Make sure that invalid param_mem are placed last in the array */
870 	if (!m0->mobj && !m1->mobj)
871 		return 0;
872 	if (!m0->mobj)
873 		return 1;
874 	if (!m1->mobj)
875 		return -1;
876 
877 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
878 	if (ret)
879 		return ret;
880 
881 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
882 	if (ret)
883 		return ret;
884 
885 	ret = CMP_TRILEAN(m0->offs, m1->offs);
886 	if (ret)
887 		return ret;
888 
889 	return CMP_TRILEAN(m0->size, m1->size);
890 }
891 
892 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
893 			void *param_va[TEE_NUM_PARAMS])
894 {
895 	TEE_Result res = TEE_SUCCESS;
896 	size_t n;
897 	size_t m;
898 	struct param_mem mem[TEE_NUM_PARAMS];
899 
900 	memset(mem, 0, sizeof(mem));
901 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
902 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
903 		size_t phys_offs;
904 
905 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
906 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
907 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
908 			continue;
909 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
910 					       CORE_MMU_USER_PARAM_SIZE);
911 		mem[n].mobj = param->u[n].mem.mobj;
912 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
913 					CORE_MMU_USER_PARAM_SIZE);
914 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
915 				      mem[n].offs + param->u[n].mem.size,
916 				      CORE_MMU_USER_PARAM_SIZE);
917 		/*
918 		 * For size 0 (raw pointer parameter), add minimum size
919 		 * value to allow address to be mapped
920 		 */
921 		if (!mem[n].size)
922 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
923 	}
924 
925 	/*
926 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
927 	 * mobj pointer value since those entries can't be merged either,
928 	 * finally by offset.
929 	 *
930 	 * This should result in a list where all mergeable entries are
931 	 * next to each other and unused/invalid entries are at the end.
932 	 */
933 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
934 
935 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
936 		if (mem[n].mobj == mem[m].mobj &&
937 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
938 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
939 					      mem[n].offs, mem[n].size))) {
940 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
941 			continue;
942 		}
943 		m++;
944 		if (n != m)
945 			mem[m] = mem[n];
946 	}
947 	/*
948 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
949 	 * index of the last valid entry if the first entry is valid, else
950 	 * 0.
951 	 */
952 	if (mem[0].mobj)
953 		m++;
954 
955 	check_param_map_empty(uctx);
956 
957 	for (n = 0; n < m; n++) {
958 		vaddr_t va = 0;
959 
960 		res = vm_map(uctx, &va, mem[n].size,
961 			     TEE_MATTR_PRW | TEE_MATTR_URW,
962 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
963 			     mem[n].mobj, mem[n].offs);
964 		if (res)
965 			goto out;
966 	}
967 
968 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
969 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
970 
971 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
972 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
973 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
974 			continue;
975 		if (!param->u[n].mem.mobj)
976 			continue;
977 
978 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
979 					   param_va + n);
980 		if (res != TEE_SUCCESS)
981 			goto out;
982 	}
983 
984 	res = alloc_pgt(uctx);
985 out:
986 	if (res)
987 		vm_clean_param(uctx);
988 
989 	return res;
990 }
991 
992 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
993 			vaddr_t *va)
994 {
995 	TEE_Result res = TEE_SUCCESS;
996 	struct vm_region *reg = NULL;
997 
998 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
999 		return TEE_ERROR_BAD_PARAMETERS;
1000 
1001 	reg = calloc(1, sizeof(*reg));
1002 	if (!reg)
1003 		return TEE_ERROR_OUT_OF_MEMORY;
1004 
1005 	reg->mobj = mobj;
1006 	reg->offset = 0;
1007 	reg->va = 0;
1008 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1009 	reg->attr = TEE_MATTR_SECURE;
1010 
1011 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1012 	if (res) {
1013 		free(reg);
1014 		return res;
1015 	}
1016 
1017 	res = alloc_pgt(uctx);
1018 	if (res)
1019 		umap_remove_region(&uctx->vm_info, reg);
1020 	else
1021 		*va = reg->va;
1022 
1023 	return res;
1024 }
1025 
1026 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1027 {
1028 	struct vm_region *r = NULL;
1029 
1030 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1031 		if (r->mobj == mobj && r->va == va) {
1032 			rem_um_region(uctx, r);
1033 			umap_remove_region(&uctx->vm_info, r);
1034 			return;
1035 		}
1036 	}
1037 }
1038 
1039 void vm_info_final(struct user_mode_ctx *uctx)
1040 {
1041 	if (!uctx->vm_info.asid)
1042 		return;
1043 
1044 	/* clear MMU entries to avoid clash when asid is reused */
1045 	tlbi_asid(uctx->vm_info.asid);
1046 
1047 	asid_free(uctx->vm_info.asid);
1048 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1049 		umap_remove_region(&uctx->vm_info,
1050 				   TAILQ_FIRST(&uctx->vm_info.regions));
1051 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1052 }
1053 
1054 /* return true only if buffer fits inside TA private memory */
1055 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1056 				 const void *va, size_t size)
1057 {
1058 	struct vm_region *r = NULL;
1059 
1060 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1061 		if (r->flags & VM_FLAGS_NONPRIV)
1062 			continue;
1063 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1064 			return true;
1065 	}
1066 
1067 	return false;
1068 }
1069 
1070 /* return true only if buffer intersects TA private memory */
1071 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1072 				  const void *va, size_t size)
1073 {
1074 	struct vm_region *r = NULL;
1075 
1076 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1077 		if (r->attr & VM_FLAGS_NONPRIV)
1078 			continue;
1079 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1080 			return true;
1081 	}
1082 
1083 	return false;
1084 }
1085 
1086 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1087 			       const void *va, size_t size,
1088 			       struct mobj **mobj, size_t *offs)
1089 {
1090 	struct vm_region *r = NULL;
1091 
1092 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1093 		if (!r->mobj)
1094 			continue;
1095 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1096 			size_t poffs;
1097 
1098 			poffs = mobj_get_phys_offs(r->mobj,
1099 						   CORE_MMU_USER_PARAM_SIZE);
1100 			*mobj = r->mobj;
1101 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1102 			return TEE_SUCCESS;
1103 		}
1104 	}
1105 
1106 	return TEE_ERROR_BAD_PARAMETERS;
1107 }
1108 
1109 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1110 					  void *ua, paddr_t *pa, uint32_t *attr)
1111 {
1112 	struct vm_region *region = NULL;
1113 
1114 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1115 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1116 					   region->size))
1117 			continue;
1118 
1119 		if (pa) {
1120 			TEE_Result res;
1121 			paddr_t p;
1122 			size_t offset;
1123 			size_t granule;
1124 
1125 			/*
1126 			 * mobj and input user address may each include
1127 			 * a specific offset-in-granule position.
1128 			 * Drop both to get target physical page base
1129 			 * address then apply only user address
1130 			 * offset-in-granule.
1131 			 * Mapping lowest granule is the small page.
1132 			 */
1133 			granule = MAX(region->mobj->phys_granule,
1134 				      (size_t)SMALL_PAGE_SIZE);
1135 			assert(!granule || IS_POWER_OF_TWO(granule));
1136 
1137 			offset = region->offset +
1138 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1139 
1140 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1141 			if (res != TEE_SUCCESS)
1142 				return res;
1143 
1144 			*pa = p | ((vaddr_t)ua & (granule - 1));
1145 		}
1146 		if (attr)
1147 			*attr = region->attr;
1148 
1149 		return TEE_SUCCESS;
1150 	}
1151 
1152 	return TEE_ERROR_ACCESS_DENIED;
1153 }
1154 
1155 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1156 {
1157 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1158 }
1159 
1160 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1161 {
1162 	paddr_t p = 0;
1163 	struct vm_region *region = NULL;
1164 
1165 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1166 		size_t granule = 0;
1167 		size_t size = 0;
1168 		size_t ofs = 0;
1169 
1170 		/* pa2va is expected only for memory tracked through mobj */
1171 		if (!region->mobj)
1172 			continue;
1173 
1174 		/* Physically granulated memory object must be scanned */
1175 		granule = region->mobj->phys_granule;
1176 		assert(!granule || IS_POWER_OF_TWO(granule));
1177 
1178 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1179 
1180 			if (granule) {
1181 				/* From current offset to buffer/granule end */
1182 				size = granule - (ofs & (granule - 1));
1183 
1184 				if (size > (region->size - ofs))
1185 					size = region->size - ofs;
1186 			} else {
1187 				size = region->size;
1188 			}
1189 
1190 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1191 				continue;
1192 
1193 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1194 				/* Remove region offset (mobj phys offset) */
1195 				ofs -= region->offset;
1196 				/* Get offset-in-granule */
1197 				p = pa - p;
1198 
1199 				return (void *)(region->va + ofs + (vaddr_t)p);
1200 			}
1201 		}
1202 	}
1203 
1204 	return NULL;
1205 }
1206 
1207 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1208 				  uint32_t flags, uaddr_t uaddr, size_t len)
1209 {
1210 	uaddr_t a = 0;
1211 	uaddr_t end_addr = 0;
1212 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1213 			       CORE_MMU_USER_PARAM_SIZE);
1214 
1215 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1216 		return TEE_ERROR_ACCESS_DENIED;
1217 
1218 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1219 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1220 		return TEE_ERROR_ACCESS_DENIED;
1221 
1222 	/*
1223 	 * Rely on TA private memory test to check if address range is private
1224 	 * to TA or not.
1225 	 */
1226 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1227 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1228 		return TEE_ERROR_ACCESS_DENIED;
1229 
1230 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1231 		uint32_t attr;
1232 		TEE_Result res;
1233 
1234 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1235 		if (res != TEE_SUCCESS)
1236 			return res;
1237 
1238 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1239 		    (attr & TEE_MATTR_SECURE))
1240 			return TEE_ERROR_ACCESS_DENIED;
1241 
1242 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1243 		    !(attr & TEE_MATTR_SECURE))
1244 			return TEE_ERROR_ACCESS_DENIED;
1245 
1246 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1247 			return TEE_ERROR_ACCESS_DENIED;
1248 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1249 			return TEE_ERROR_ACCESS_DENIED;
1250 	}
1251 
1252 	return TEE_SUCCESS;
1253 }
1254 
1255 void vm_set_ctx(struct ts_ctx *ctx)
1256 {
1257 	struct thread_specific_data *tsd = thread_get_tsd();
1258 	struct user_mode_ctx *uctx = NULL;
1259 
1260 	core_mmu_set_user_map(NULL);
1261 
1262 	if (is_user_mode_ctx(tsd->ctx)) {
1263 		/*
1264 		 * We're coming from a user mode context so we must make
1265 		 * the pgts available for reuse.
1266 		 */
1267 		uctx = to_user_mode_ctx(tsd->ctx);
1268 		pgt_free(&uctx->pgt_cache);
1269 	}
1270 
1271 	if (is_user_mode_ctx(ctx)) {
1272 		struct core_mmu_user_map map = { };
1273 
1274 		uctx = to_user_mode_ctx(ctx);
1275 		core_mmu_create_user_map(uctx, &map);
1276 		core_mmu_set_user_map(&map);
1277 		tee_pager_assign_um_tables(uctx);
1278 	}
1279 	tsd->ctx = ctx;
1280 }
1281 
1282 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1283 			 uint16_t *prot, size_t *offs)
1284 {
1285 	struct vm_region *r = NULL;
1286 	size_t r_offs = 0;
1287 
1288 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1289 		return NULL;
1290 
1291 	r = find_vm_region(&uctx->vm_info, va);
1292 	if (!r)
1293 		return NULL;
1294 
1295 	r_offs = va - r->va;
1296 
1297 	*len = MIN(r->size - r_offs, *len);
1298 	*offs = r->offset + r_offs;
1299 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1300 	return mobj_get(r->mobj);
1301 }
1302