xref: /optee_os/core/mm/vm.c (revision d6e33310b68a97532b14f642d5a4c8ca14263186)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(&uctx->vm_info)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_get_all(&uctx->pgt_cache, uctx->ts_ctx, &uctx->vm_info);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	struct pgt_cache *pgt_cache = &uctx->pgt_cache;
138 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
139 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
140 	struct vm_region *r2 = NULL;
141 
142 	if (mobj_is_paged(r->mobj)) {
143 		tee_pager_rem_um_region(uctx, r->va, r->size);
144 	} else {
145 		pgt_clear_ctx_range(pgt_cache, uctx->ts_ctx, r->va,
146 				    r->va + r->size);
147 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
148 				    uctx->vm_info.asid);
149 	}
150 
151 	r2 = TAILQ_NEXT(r, link);
152 	if (r2)
153 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
154 
155 	r2 = TAILQ_PREV(r, vm_region_head, link);
156 	if (r2)
157 		begin = MAX(begin,
158 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
159 
160 	/* If there's no unused page tables, there's nothing left to do */
161 	if (begin >= last)
162 		return;
163 
164 	pgt_flush_ctx_range(pgt_cache, uctx->ts_ctx, r->va, r->va + r->size);
165 }
166 
167 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va,
168 			 paddr_t pa, size_t size, uint32_t attr)
169 {
170 	unsigned int end = core_mmu_va2idx(ti, va + size);
171 	unsigned int idx = core_mmu_va2idx(ti, va);
172 
173 	while (idx < end) {
174 		core_mmu_set_entry(ti, idx, pa, attr);
175 		idx++;
176 		pa += BIT64(ti->shift);
177 	}
178 }
179 
180 static void set_reg_in_table(struct core_mmu_table_info *ti,
181 			     struct vm_region *r)
182 {
183 	vaddr_t va = MAX(r->va, ti->va_base);
184 	vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE);
185 	size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj));
186 	size_t granule = BIT(ti->shift);
187 	size_t offset = 0;
188 	paddr_t pa = 0;
189 
190 	while (va < end) {
191 		offset = va - r->va + r->offset;
192 		if (mobj_get_pa(r->mobj, offset, granule, &pa))
193 			panic("Failed to get PA");
194 		set_pa_range(ti, va, pa, sz, r->attr);
195 		va += sz;
196 	}
197 }
198 
199 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
200 {
201 	struct pgt *p = SLIST_FIRST(&uctx->pgt_cache);
202 	struct core_mmu_table_info ti = { };
203 
204 	assert(!mobj_is_paged(r->mobj));
205 
206 	core_mmu_set_info_table(&ti, CORE_MMU_PGDIR_LEVEL, 0, NULL);
207 
208 	if (p) {
209 		/* All the pgts are already allocated, update in place */
210 		do {
211 			ti.va_base = p->vabase;
212 			ti.table = p->tbl;
213 			set_reg_in_table(&ti, r);
214 			p = SLIST_NEXT(p, link);
215 		} while (p);
216 	} else {
217 		/*
218 		 * We may have a few pgts in the cache list, update the
219 		 * ones found.
220 		 */
221 		for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
222 		     ti.va_base < r->va + r->size;
223 		     ti.va_base += CORE_MMU_PGDIR_SIZE) {
224 			p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx);
225 			if (!p)
226 				continue;
227 			ti.table = p->tbl;
228 			set_reg_in_table(&ti, r);
229 			pgt_push_to_cache_list(p);
230 		}
231 	}
232 }
233 
234 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
235 				  size_t pad_begin, size_t pad_end,
236 				  size_t align)
237 {
238 	struct vm_region dummy_first_reg = { };
239 	struct vm_region dummy_last_reg = { };
240 	struct vm_region *r = NULL;
241 	struct vm_region *prev_r = NULL;
242 	vaddr_t va_range_base = 0;
243 	size_t va_range_size = 0;
244 	size_t granul;
245 	vaddr_t va = 0;
246 	size_t offs_plus_size = 0;
247 
248 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
249 	dummy_first_reg.va = va_range_base;
250 	dummy_last_reg.va = va_range_base + va_range_size;
251 
252 	/* Check alignment, it has to be at least SMALL_PAGE based */
253 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
254 		return TEE_ERROR_ACCESS_CONFLICT;
255 
256 	/* Check that the mobj is defined for the entire range */
257 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
258 		return TEE_ERROR_BAD_PARAMETERS;
259 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
260 		return TEE_ERROR_BAD_PARAMETERS;
261 
262 	granul = MAX(align, SMALL_PAGE_SIZE);
263 	if (!IS_POWER_OF_TWO(granul))
264 		return TEE_ERROR_BAD_PARAMETERS;
265 
266 	prev_r = &dummy_first_reg;
267 	TAILQ_FOREACH(r, &vmi->regions, link) {
268 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
269 					granul);
270 		if (va) {
271 			reg->va = va;
272 			TAILQ_INSERT_BEFORE(r, reg, link);
273 			return TEE_SUCCESS;
274 		}
275 		prev_r = r;
276 	}
277 
278 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
279 	if (!r)
280 		r = &dummy_first_reg;
281 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
282 				granul);
283 	if (va) {
284 		reg->va = va;
285 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
286 		return TEE_SUCCESS;
287 	}
288 
289 	return TEE_ERROR_ACCESS_CONFLICT;
290 }
291 
292 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
293 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
294 		      size_t offs, size_t pad_begin, size_t pad_end,
295 		      size_t align)
296 {
297 	TEE_Result res = TEE_SUCCESS;
298 	struct vm_region *reg = NULL;
299 	uint32_t attr = 0;
300 
301 	if (prot & ~TEE_MATTR_PROT_MASK)
302 		return TEE_ERROR_BAD_PARAMETERS;
303 
304 	reg = calloc(1, sizeof(*reg));
305 	if (!reg)
306 		return TEE_ERROR_OUT_OF_MEMORY;
307 
308 	if (!mobj_is_paged(mobj)) {
309 		uint32_t mem_type = 0;
310 
311 		res = mobj_get_mem_type(mobj, &mem_type);
312 		if (res)
313 			goto err_free_reg;
314 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
315 	}
316 	attr |= TEE_MATTR_VALID_BLOCK;
317 	if (mobj_is_secure(mobj))
318 		attr |= TEE_MATTR_SECURE;
319 
320 	reg->mobj = mobj_get(mobj);
321 	reg->offset = offs;
322 	reg->va = *va;
323 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
324 	reg->attr = attr | prot;
325 	reg->flags = flags;
326 
327 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
328 	if (res)
329 		goto err_put_mobj;
330 
331 	res = alloc_pgt(uctx);
332 	if (res)
333 		goto err_rem_reg;
334 
335 	if (mobj_is_paged(mobj)) {
336 		struct fobj *fobj = mobj_get_fobj(mobj);
337 
338 		if (!fobj) {
339 			res = TEE_ERROR_GENERIC;
340 			goto err_rem_reg;
341 		}
342 
343 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
344 		fobj_put(fobj);
345 		if (res)
346 			goto err_rem_reg;
347 	} else {
348 		set_um_region(uctx, reg);
349 	}
350 
351 	/*
352 	 * If the context currently is active set it again to update
353 	 * the mapping.
354 	 */
355 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
356 		vm_set_ctx(uctx->ts_ctx);
357 
358 	*va = reg->va;
359 
360 	return TEE_SUCCESS;
361 
362 err_rem_reg:
363 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
364 err_put_mobj:
365 	mobj_put(reg->mobj);
366 err_free_reg:
367 	free(reg);
368 	return res;
369 }
370 
371 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
372 {
373 	struct vm_region *r = NULL;
374 
375 	TAILQ_FOREACH(r, &vm_info->regions, link)
376 		if (va >= r->va && va < r->va + r->size)
377 			return r;
378 
379 	return NULL;
380 }
381 
382 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
383 				   size_t len,
384 				   bool (*cmp_regs)(const struct vm_region *r0,
385 						    const struct vm_region *r,
386 						    const struct vm_region *rn))
387 {
388 	struct vm_region *r = r0;
389 	vaddr_t end_va = 0;
390 
391 	if (ADD_OVERFLOW(va, len, &end_va))
392 		return false;
393 
394 	while (true) {
395 		struct vm_region *r_next = TAILQ_NEXT(r, link);
396 		vaddr_t r_end_va = r->va + r->size;
397 
398 		if (r_end_va >= end_va)
399 			return true;
400 		if (!r_next)
401 			return false;
402 		if (r_end_va != r_next->va)
403 			return false;
404 		if (cmp_regs && !cmp_regs(r0, r, r_next))
405 			return false;
406 		r = r_next;
407 	}
408 }
409 
410 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
411 				  struct vm_region *r, vaddr_t va)
412 {
413 	struct vm_region *r2 = NULL;
414 	size_t diff = va - r->va;
415 
416 	assert(diff && diff < r->size);
417 
418 	r2 = calloc(1, sizeof(*r2));
419 	if (!r2)
420 		return TEE_ERROR_OUT_OF_MEMORY;
421 
422 	if (mobj_is_paged(r->mobj)) {
423 		TEE_Result res = tee_pager_split_um_region(uctx, va);
424 
425 		if (res) {
426 			free(r2);
427 			return res;
428 		}
429 	}
430 
431 	r2->mobj = mobj_get(r->mobj);
432 	r2->offset = r->offset + diff;
433 	r2->va = va;
434 	r2->size = r->size - diff;
435 	r2->attr = r->attr;
436 	r2->flags = r->flags;
437 
438 	r->size = diff;
439 
440 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
441 
442 	return TEE_SUCCESS;
443 }
444 
445 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
446 				 size_t len,
447 				 bool (*cmp_regs)(const struct vm_region *r0,
448 						  const struct vm_region *r,
449 						  const struct vm_region *rn),
450 				 struct vm_region **r0_ret)
451 {
452 	TEE_Result res = TEE_SUCCESS;
453 	struct vm_region *r = NULL;
454 	vaddr_t end_va = 0;
455 
456 	if ((va | len) & SMALL_PAGE_MASK)
457 		return TEE_ERROR_BAD_PARAMETERS;
458 
459 	if (ADD_OVERFLOW(va, len, &end_va))
460 		return TEE_ERROR_BAD_PARAMETERS;
461 
462 	/*
463 	 * Find first vm_region in range and check that the entire range is
464 	 * contiguous.
465 	 */
466 	r = find_vm_region(&uctx->vm_info, va);
467 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
468 		return TEE_ERROR_BAD_PARAMETERS;
469 
470 	/*
471 	 * If needed split regions so that va and len covers only complete
472 	 * regions.
473 	 */
474 	if (va != r->va) {
475 		res = split_vm_region(uctx, r, va);
476 		if (res)
477 			return res;
478 		r = TAILQ_NEXT(r, link);
479 	}
480 
481 	*r0_ret = r;
482 	r = find_vm_region(&uctx->vm_info, va + len - 1);
483 	if (!r)
484 		return TEE_ERROR_BAD_PARAMETERS;
485 	if (end_va != r->va + r->size) {
486 		res = split_vm_region(uctx, r, end_va);
487 		if (res)
488 			return res;
489 	}
490 
491 	return TEE_SUCCESS;
492 }
493 
494 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
495 {
496 	struct vm_region *r_next = NULL;
497 	struct vm_region *r = NULL;
498 	vaddr_t end_va = 0;
499 
500 	if (ADD_OVERFLOW(va, len, &end_va))
501 		return;
502 
503 	tee_pager_merge_um_region(uctx, va, len);
504 
505 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
506 		r_next = TAILQ_NEXT(r, link);
507 		if (!r_next)
508 			return;
509 
510 		/* Try merging with the region just before va */
511 		if (r->va + r->size < va)
512 			continue;
513 
514 		/*
515 		 * If r->va is well past our range we're done.
516 		 * Note that if it's just the page after our range we'll
517 		 * try to merge.
518 		 */
519 		if (r->va > end_va)
520 			return;
521 
522 		if (r->va + r->size != r_next->va)
523 			continue;
524 		if (r->mobj != r_next->mobj ||
525 		    r->flags != r_next->flags ||
526 		    r->attr != r_next->attr)
527 			continue;
528 		if (r->offset + r->size != r_next->offset)
529 			continue;
530 
531 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
532 		r->size += r_next->size;
533 		mobj_put(r_next->mobj);
534 		free(r_next);
535 		r_next = r;
536 	}
537 }
538 
539 static bool cmp_region_for_remap(const struct vm_region *r0,
540 				 const struct vm_region *r,
541 				 const struct vm_region *rn)
542 {
543 	/*
544 	 * All the essentionals has to match for remap to make sense. The
545 	 * essentials are, mobj/fobj, attr, flags and the offset should be
546 	 * contiguous.
547 	 *
548 	 * Note that vm_remap() depends on mobj/fobj to be the same.
549 	 */
550 	return r0->flags == r->flags && r0->attr == r->attr &&
551 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
552 }
553 
554 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
555 		    size_t len, size_t pad_begin, size_t pad_end)
556 {
557 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
558 	TEE_Result res = TEE_SUCCESS;
559 	struct vm_region *r0 = NULL;
560 	struct vm_region *r = NULL;
561 	struct vm_region *r_next = NULL;
562 	struct vm_region *r_last = NULL;
563 	struct vm_region *r_first = NULL;
564 	struct fobj *fobj = NULL;
565 	vaddr_t next_va = 0;
566 
567 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
568 
569 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
570 		return TEE_ERROR_BAD_PARAMETERS;
571 
572 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
573 	if (res)
574 		return res;
575 
576 	if (mobj_is_paged(r0->mobj)) {
577 		fobj = mobj_get_fobj(r0->mobj);
578 		if (!fobj)
579 			panic();
580 	}
581 
582 	for (r = r0; r; r = r_next) {
583 		if (r->va + r->size > old_va + len)
584 			break;
585 		r_next = TAILQ_NEXT(r, link);
586 		rem_um_region(uctx, r);
587 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
588 		TAILQ_INSERT_TAIL(&regs, r, link);
589 	}
590 
591 	/*
592 	 * Synchronize change to translation tables. Even though the pager
593 	 * case unmaps immediately we may still free a translation table.
594 	 */
595 	vm_set_ctx(uctx->ts_ctx);
596 
597 	r_first = TAILQ_FIRST(&regs);
598 	while (!TAILQ_EMPTY(&regs)) {
599 		r = TAILQ_FIRST(&regs);
600 		TAILQ_REMOVE(&regs, r, link);
601 		if (r_last) {
602 			r->va = r_last->va + r_last->size;
603 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
604 		} else {
605 			r->va = *new_va;
606 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
607 					      pad_end + len - r->size, 0);
608 		}
609 		if (!res) {
610 			r_last = r;
611 			res = alloc_pgt(uctx);
612 		}
613 		if (!res) {
614 			if (!fobj)
615 				set_um_region(uctx, r);
616 			else
617 				res = tee_pager_add_um_region(uctx, r->va, fobj,
618 							      r->attr);
619 		}
620 
621 		if (res) {
622 			/*
623 			 * Something went wrong move all the recently added
624 			 * regions back to regs for later reinsertion at
625 			 * the original spot.
626 			 */
627 			struct vm_region *r_tmp = NULL;
628 			struct vm_region *r_stop = NULL;
629 
630 			if (r != r_last) {
631 				/*
632 				 * umap_add_region() failed, move r back to
633 				 * regs before all the rest are moved back.
634 				 */
635 				TAILQ_INSERT_HEAD(&regs, r, link);
636 			}
637 			if (r_last)
638 				r_stop = TAILQ_NEXT(r_last, link);
639 			for (r = r_first; r != r_stop; r = r_next) {
640 				r_next = TAILQ_NEXT(r, link);
641 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
642 				if (r_tmp)
643 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
644 							   link);
645 				else
646 					TAILQ_INSERT_HEAD(&regs, r, link);
647 				r_tmp = r;
648 			}
649 
650 			goto err_restore_map;
651 		}
652 	}
653 
654 	fobj_put(fobj);
655 
656 	vm_set_ctx(uctx->ts_ctx);
657 	*new_va = r_first->va;
658 
659 	return TEE_SUCCESS;
660 
661 err_restore_map:
662 	next_va = old_va;
663 	while (!TAILQ_EMPTY(&regs)) {
664 		r = TAILQ_FIRST(&regs);
665 		TAILQ_REMOVE(&regs, r, link);
666 		r->va = next_va;
667 		next_va += r->size;
668 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
669 			panic("Cannot restore mapping");
670 		if (alloc_pgt(uctx))
671 			panic("Cannot restore mapping");
672 		if (fobj) {
673 			if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
674 				panic("Cannot restore mapping");
675 		} else {
676 			set_um_region(uctx, r);
677 		}
678 	}
679 	fobj_put(fobj);
680 	vm_set_ctx(uctx->ts_ctx);
681 
682 	return res;
683 }
684 
685 static bool cmp_region_for_get_flags(const struct vm_region *r0,
686 				     const struct vm_region *r,
687 				     const struct vm_region *rn __unused)
688 {
689 	return r0->flags == r->flags;
690 }
691 
692 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
693 			uint32_t *flags)
694 {
695 	struct vm_region *r = NULL;
696 
697 	if (!len || ((len | va) & SMALL_PAGE_MASK))
698 		return TEE_ERROR_BAD_PARAMETERS;
699 
700 	r = find_vm_region(&uctx->vm_info, va);
701 	if (!r)
702 		return TEE_ERROR_BAD_PARAMETERS;
703 
704 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	*flags = r->flags;
708 
709 	return TEE_SUCCESS;
710 }
711 
712 static bool cmp_region_for_get_prot(const struct vm_region *r0,
713 				    const struct vm_region *r,
714 				    const struct vm_region *rn __unused)
715 {
716 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
717 	       (r->attr & TEE_MATTR_PROT_MASK);
718 }
719 
720 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
721 		       uint16_t *prot)
722 {
723 	struct vm_region *r = NULL;
724 
725 	if (!len || ((len | va) & SMALL_PAGE_MASK))
726 		return TEE_ERROR_BAD_PARAMETERS;
727 
728 	r = find_vm_region(&uctx->vm_info, va);
729 	if (!r)
730 		return TEE_ERROR_BAD_PARAMETERS;
731 
732 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
733 		return TEE_ERROR_BAD_PARAMETERS;
734 
735 	*prot = r->attr & TEE_MATTR_PROT_MASK;
736 
737 	return TEE_SUCCESS;
738 }
739 
740 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
741 		       uint32_t prot)
742 {
743 	TEE_Result res = TEE_SUCCESS;
744 	struct vm_region *r0 = NULL;
745 	struct vm_region *r = NULL;
746 	bool was_writeable = false;
747 	bool need_sync = false;
748 
749 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
750 
751 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
752 		return TEE_ERROR_BAD_PARAMETERS;
753 
754 	res = split_vm_range(uctx, va, len, NULL, &r0);
755 	if (res)
756 		return res;
757 
758 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
759 		if (r->va + r->size > va + len)
760 			break;
761 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
762 			was_writeable = true;
763 
764 		r->attr &= ~TEE_MATTR_PROT_MASK;
765 		r->attr |= prot;
766 
767 		if (!mobj_is_paged(r->mobj)) {
768 			need_sync = true;
769 			set_um_region(uctx, r);
770 			/*
771 			 * Normally when set_um_region() is called we
772 			 * change from no mapping to some mapping, but in
773 			 * this case we change the permissions on an
774 			 * already present mapping so some TLB invalidation
775 			 * is needed. We also depend on the dsb() performed
776 			 * as part of the TLB invalidation.
777 			 */
778 			tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
779 					    uctx->vm_info.asid);
780 		}
781 	}
782 
783 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
784 		if (r->va + r->size > va + len)
785 			break;
786 		if (mobj_is_paged(r->mobj)) {
787 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
788 							  prot))
789 				panic();
790 		} else if (was_writeable) {
791 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
792 				       r->size);
793 		}
794 
795 	}
796 	if (need_sync && was_writeable)
797 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
798 
799 	merge_vm_range(uctx, va, len);
800 
801 	return TEE_SUCCESS;
802 }
803 
804 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
805 {
806 	TAILQ_REMOVE(&vmi->regions, reg, link);
807 	mobj_put(reg->mobj);
808 	free(reg);
809 }
810 
811 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
812 {
813 	TEE_Result res = TEE_SUCCESS;
814 	struct vm_region *r = NULL;
815 	struct vm_region *r_next = NULL;
816 	size_t end_va = 0;
817 	size_t unmap_end_va = 0;
818 	size_t l = 0;
819 
820 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
821 
822 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
823 		return TEE_ERROR_BAD_PARAMETERS;
824 
825 	if (!l || (va & SMALL_PAGE_MASK))
826 		return TEE_ERROR_BAD_PARAMETERS;
827 
828 	if (ADD_OVERFLOW(va, l, &end_va))
829 		return TEE_ERROR_BAD_PARAMETERS;
830 
831 	res = split_vm_range(uctx, va, l, NULL, &r);
832 	if (res)
833 		return res;
834 
835 	while (true) {
836 		r_next = TAILQ_NEXT(r, link);
837 		unmap_end_va = r->va + r->size;
838 		rem_um_region(uctx, r);
839 		umap_remove_region(&uctx->vm_info, r);
840 		if (!r_next || unmap_end_va == end_va)
841 			break;
842 		r = r_next;
843 	}
844 
845 	return TEE_SUCCESS;
846 }
847 
848 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
849 {
850 	TEE_Result res = TEE_SUCCESS;
851 	struct mobj *mobj = NULL;
852 	size_t offs = 0;
853 	vaddr_t va = 0;
854 	size_t sz = 0;
855 	uint32_t prot = 0;
856 
857 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
858 	if (sz) {
859 		prot = TEE_MATTR_PRX;
860 		if (IS_ENABLED(CFG_CORE_BTI))
861 			prot |= TEE_MATTR_GUARDED;
862 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
863 			     mobj, offs);
864 		if (res)
865 			return res;
866 	}
867 
868 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
869 	if (sz)
870 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
871 			      mobj, offs);
872 
873 	return TEE_SUCCESS;
874 }
875 
876 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
877 {
878 	TEE_Result res;
879 	uint32_t asid = asid_alloc();
880 
881 	if (!asid) {
882 		DMSG("Failed to allocate ASID");
883 		return TEE_ERROR_GENERIC;
884 	}
885 
886 	memset(uctx, 0, sizeof(*uctx));
887 	TAILQ_INIT(&uctx->vm_info.regions);
888 	SLIST_INIT(&uctx->pgt_cache);
889 	uctx->vm_info.asid = asid;
890 	uctx->ts_ctx = ts_ctx;
891 
892 	res = map_kinit(uctx);
893 	if (res)
894 		vm_info_final(uctx);
895 	return res;
896 }
897 
898 void vm_clean_param(struct user_mode_ctx *uctx)
899 {
900 	struct vm_region *next_r;
901 	struct vm_region *r;
902 
903 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
904 		if (r->flags & VM_FLAG_EPHEMERAL) {
905 			rem_um_region(uctx, r);
906 			umap_remove_region(&uctx->vm_info, r);
907 		}
908 	}
909 }
910 
911 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
912 {
913 	struct vm_region *r = NULL;
914 
915 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
916 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
917 }
918 
919 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
920 				       struct param_mem *mem, void **user_va)
921 {
922 	struct vm_region *region = NULL;
923 
924 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
925 		vaddr_t va = 0;
926 		size_t phys_offs = 0;
927 
928 		if (!(region->flags & VM_FLAG_EPHEMERAL))
929 			continue;
930 		if (mem->mobj != region->mobj)
931 			continue;
932 
933 		phys_offs = mobj_get_phys_offs(mem->mobj,
934 					       CORE_MMU_USER_PARAM_SIZE);
935 		phys_offs += mem->offs;
936 		if (phys_offs < region->offset)
937 			continue;
938 		if (phys_offs >= (region->offset + region->size))
939 			continue;
940 		va = region->va + phys_offs - region->offset;
941 		*user_va = (void *)va;
942 		return TEE_SUCCESS;
943 	}
944 	return TEE_ERROR_GENERIC;
945 }
946 
947 static int cmp_param_mem(const void *a0, const void *a1)
948 {
949 	const struct param_mem *m1 = a1;
950 	const struct param_mem *m0 = a0;
951 	int ret;
952 
953 	/* Make sure that invalid param_mem are placed last in the array */
954 	if (!m0->mobj && !m1->mobj)
955 		return 0;
956 	if (!m0->mobj)
957 		return 1;
958 	if (!m1->mobj)
959 		return -1;
960 
961 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
962 	if (ret)
963 		return ret;
964 
965 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
966 	if (ret)
967 		return ret;
968 
969 	ret = CMP_TRILEAN(m0->offs, m1->offs);
970 	if (ret)
971 		return ret;
972 
973 	return CMP_TRILEAN(m0->size, m1->size);
974 }
975 
976 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
977 			void *param_va[TEE_NUM_PARAMS])
978 {
979 	TEE_Result res = TEE_SUCCESS;
980 	size_t n;
981 	size_t m;
982 	struct param_mem mem[TEE_NUM_PARAMS];
983 
984 	memset(mem, 0, sizeof(mem));
985 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
986 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
987 		size_t phys_offs;
988 
989 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
990 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
991 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
992 			continue;
993 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
994 					       CORE_MMU_USER_PARAM_SIZE);
995 		mem[n].mobj = param->u[n].mem.mobj;
996 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
997 					CORE_MMU_USER_PARAM_SIZE);
998 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
999 				      mem[n].offs + param->u[n].mem.size,
1000 				      CORE_MMU_USER_PARAM_SIZE);
1001 		/*
1002 		 * For size 0 (raw pointer parameter), add minimum size
1003 		 * value to allow address to be mapped
1004 		 */
1005 		if (!mem[n].size)
1006 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
1007 	}
1008 
1009 	/*
1010 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
1011 	 * mobj pointer value since those entries can't be merged either,
1012 	 * finally by offset.
1013 	 *
1014 	 * This should result in a list where all mergeable entries are
1015 	 * next to each other and unused/invalid entries are at the end.
1016 	 */
1017 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
1018 
1019 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
1020 		if (mem[n].mobj == mem[m].mobj &&
1021 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
1022 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
1023 					      mem[n].offs, mem[n].size))) {
1024 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
1025 			continue;
1026 		}
1027 		m++;
1028 		if (n != m)
1029 			mem[m] = mem[n];
1030 	}
1031 	/*
1032 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
1033 	 * index of the last valid entry if the first entry is valid, else
1034 	 * 0.
1035 	 */
1036 	if (mem[0].mobj)
1037 		m++;
1038 
1039 	check_param_map_empty(uctx);
1040 
1041 	for (n = 0; n < m; n++) {
1042 		vaddr_t va = 0;
1043 
1044 		res = vm_map(uctx, &va, mem[n].size,
1045 			     TEE_MATTR_PRW | TEE_MATTR_URW,
1046 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
1047 			     mem[n].mobj, mem[n].offs);
1048 		if (res)
1049 			goto out;
1050 	}
1051 
1052 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
1053 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
1054 
1055 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
1056 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1057 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1058 			continue;
1059 		if (!param->u[n].mem.mobj)
1060 			continue;
1061 
1062 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1063 					   param_va + n);
1064 		if (res != TEE_SUCCESS)
1065 			goto out;
1066 	}
1067 
1068 	res = alloc_pgt(uctx);
1069 out:
1070 	if (res)
1071 		vm_clean_param(uctx);
1072 
1073 	return res;
1074 }
1075 
1076 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1077 			vaddr_t *va)
1078 {
1079 	TEE_Result res = TEE_SUCCESS;
1080 	struct vm_region *reg = NULL;
1081 
1082 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
1083 		return TEE_ERROR_BAD_PARAMETERS;
1084 
1085 	reg = calloc(1, sizeof(*reg));
1086 	if (!reg)
1087 		return TEE_ERROR_OUT_OF_MEMORY;
1088 
1089 	reg->mobj = mobj;
1090 	reg->offset = 0;
1091 	reg->va = 0;
1092 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1093 	reg->attr = TEE_MATTR_SECURE;
1094 
1095 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1096 	if (res) {
1097 		free(reg);
1098 		return res;
1099 	}
1100 
1101 	res = alloc_pgt(uctx);
1102 	if (res)
1103 		umap_remove_region(&uctx->vm_info, reg);
1104 	else
1105 		*va = reg->va;
1106 
1107 	return res;
1108 }
1109 
1110 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1111 {
1112 	struct vm_region *r = NULL;
1113 
1114 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1115 		if (r->mobj == mobj && r->va == va) {
1116 			rem_um_region(uctx, r);
1117 			umap_remove_region(&uctx->vm_info, r);
1118 			return;
1119 		}
1120 	}
1121 }
1122 
1123 void vm_info_final(struct user_mode_ctx *uctx)
1124 {
1125 	if (!uctx->vm_info.asid)
1126 		return;
1127 
1128 	pgt_flush_ctx(uctx->ts_ctx);
1129 	tee_pager_rem_um_regions(uctx);
1130 
1131 	/* clear MMU entries to avoid clash when asid is reused */
1132 	tlbi_asid(uctx->vm_info.asid);
1133 
1134 	asid_free(uctx->vm_info.asid);
1135 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1136 		umap_remove_region(&uctx->vm_info,
1137 				   TAILQ_FIRST(&uctx->vm_info.regions));
1138 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1139 }
1140 
1141 /* return true only if buffer fits inside TA private memory */
1142 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1143 				 const void *va, size_t size)
1144 {
1145 	struct vm_region *r = NULL;
1146 
1147 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1148 		if (r->flags & VM_FLAGS_NONPRIV)
1149 			continue;
1150 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1151 			return true;
1152 	}
1153 
1154 	return false;
1155 }
1156 
1157 /* return true only if buffer intersects TA private memory */
1158 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1159 				  const void *va, size_t size)
1160 {
1161 	struct vm_region *r = NULL;
1162 
1163 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1164 		if (r->attr & VM_FLAGS_NONPRIV)
1165 			continue;
1166 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1167 			return true;
1168 	}
1169 
1170 	return false;
1171 }
1172 
1173 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1174 			       const void *va, size_t size,
1175 			       struct mobj **mobj, size_t *offs)
1176 {
1177 	struct vm_region *r = NULL;
1178 
1179 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1180 		if (!r->mobj)
1181 			continue;
1182 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1183 			size_t poffs;
1184 
1185 			poffs = mobj_get_phys_offs(r->mobj,
1186 						   CORE_MMU_USER_PARAM_SIZE);
1187 			*mobj = r->mobj;
1188 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1189 			return TEE_SUCCESS;
1190 		}
1191 	}
1192 
1193 	return TEE_ERROR_BAD_PARAMETERS;
1194 }
1195 
1196 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1197 					  void *ua, paddr_t *pa, uint32_t *attr)
1198 {
1199 	struct vm_region *region = NULL;
1200 
1201 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1202 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1203 					   region->size))
1204 			continue;
1205 
1206 		if (pa) {
1207 			TEE_Result res;
1208 			paddr_t p;
1209 			size_t offset;
1210 			size_t granule;
1211 
1212 			/*
1213 			 * mobj and input user address may each include
1214 			 * a specific offset-in-granule position.
1215 			 * Drop both to get target physical page base
1216 			 * address then apply only user address
1217 			 * offset-in-granule.
1218 			 * Mapping lowest granule is the small page.
1219 			 */
1220 			granule = MAX(region->mobj->phys_granule,
1221 				      (size_t)SMALL_PAGE_SIZE);
1222 			assert(!granule || IS_POWER_OF_TWO(granule));
1223 
1224 			offset = region->offset +
1225 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1226 
1227 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1228 			if (res != TEE_SUCCESS)
1229 				return res;
1230 
1231 			*pa = p | ((vaddr_t)ua & (granule - 1));
1232 		}
1233 		if (attr)
1234 			*attr = region->attr;
1235 
1236 		return TEE_SUCCESS;
1237 	}
1238 
1239 	return TEE_ERROR_ACCESS_DENIED;
1240 }
1241 
1242 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1243 {
1244 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1245 }
1246 
1247 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1248 {
1249 	paddr_t p = 0;
1250 	struct vm_region *region = NULL;
1251 
1252 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1253 		size_t granule = 0;
1254 		size_t size = 0;
1255 		size_t ofs = 0;
1256 
1257 		/* pa2va is expected only for memory tracked through mobj */
1258 		if (!region->mobj)
1259 			continue;
1260 
1261 		/* Physically granulated memory object must be scanned */
1262 		granule = region->mobj->phys_granule;
1263 		assert(!granule || IS_POWER_OF_TWO(granule));
1264 
1265 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1266 
1267 			if (granule) {
1268 				/* From current offset to buffer/granule end */
1269 				size = granule - (ofs & (granule - 1));
1270 
1271 				if (size > (region->size - ofs))
1272 					size = region->size - ofs;
1273 			} else {
1274 				size = region->size;
1275 			}
1276 
1277 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1278 				continue;
1279 
1280 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1281 				/* Remove region offset (mobj phys offset) */
1282 				ofs -= region->offset;
1283 				/* Get offset-in-granule */
1284 				p = pa - p;
1285 
1286 				return (void *)(region->va + ofs + (vaddr_t)p);
1287 			}
1288 		}
1289 	}
1290 
1291 	return NULL;
1292 }
1293 
1294 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1295 				  uint32_t flags, uaddr_t uaddr, size_t len)
1296 {
1297 	uaddr_t a = 0;
1298 	uaddr_t end_addr = 0;
1299 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1300 			       CORE_MMU_USER_PARAM_SIZE);
1301 
1302 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1303 		return TEE_ERROR_ACCESS_DENIED;
1304 
1305 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1306 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1307 		return TEE_ERROR_ACCESS_DENIED;
1308 
1309 	/*
1310 	 * Rely on TA private memory test to check if address range is private
1311 	 * to TA or not.
1312 	 */
1313 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1314 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1315 		return TEE_ERROR_ACCESS_DENIED;
1316 
1317 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1318 		uint32_t attr;
1319 		TEE_Result res;
1320 
1321 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1322 		if (res != TEE_SUCCESS)
1323 			return res;
1324 
1325 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1326 		    (attr & TEE_MATTR_SECURE))
1327 			return TEE_ERROR_ACCESS_DENIED;
1328 
1329 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1330 		    !(attr & TEE_MATTR_SECURE))
1331 			return TEE_ERROR_ACCESS_DENIED;
1332 
1333 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1334 			return TEE_ERROR_ACCESS_DENIED;
1335 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1336 			return TEE_ERROR_ACCESS_DENIED;
1337 	}
1338 
1339 	return TEE_SUCCESS;
1340 }
1341 
1342 void vm_set_ctx(struct ts_ctx *ctx)
1343 {
1344 	struct thread_specific_data *tsd = thread_get_tsd();
1345 	struct user_mode_ctx *uctx = NULL;
1346 
1347 	core_mmu_set_user_map(NULL);
1348 
1349 	if (is_user_mode_ctx(tsd->ctx)) {
1350 		/*
1351 		 * We're coming from a user mode context so we must make
1352 		 * the pgts available for reuse.
1353 		 */
1354 		uctx = to_user_mode_ctx(tsd->ctx);
1355 		pgt_put_all(&uctx->pgt_cache);
1356 	}
1357 
1358 	if (is_user_mode_ctx(ctx)) {
1359 		struct core_mmu_user_map map = { };
1360 
1361 		uctx = to_user_mode_ctx(ctx);
1362 		core_mmu_create_user_map(uctx, &map);
1363 		core_mmu_set_user_map(&map);
1364 		tee_pager_assign_um_tables(uctx);
1365 	}
1366 	tsd->ctx = ctx;
1367 }
1368 
1369 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1370 			 uint16_t *prot, size_t *offs)
1371 {
1372 	struct vm_region *r = NULL;
1373 	size_t r_offs = 0;
1374 
1375 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1376 		return NULL;
1377 
1378 	r = find_vm_region(&uctx->vm_info, va);
1379 	if (!r)
1380 		return NULL;
1381 
1382 	r_offs = va - r->va;
1383 
1384 	*len = MIN(r->size - r_offs, *len);
1385 	*offs = r->offset + r_offs;
1386 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1387 	return mobj_get(r->mobj);
1388 }
1389