xref: /optee_os/core/mm/vm.c (revision 23ef3871cb5814f45010171373add4d339285616)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/pgt_cache.h>
22 #include <mm/tee_mm.h>
23 #include <mm/tee_mmu_types.h>
24 #include <mm/tee_pager.h>
25 #include <mm/vm.h>
26 #include <stdlib.h>
27 #include <tee_api_defines_extensions.h>
28 #include <tee_api_types.h>
29 #include <trace.h>
30 #include <types_ext.h>
31 #include <user_ta_header.h>
32 #include <util.h>
33 
34 #ifdef CFG_PL310
35 #include <kernel/tee_l2cc_mutex.h>
36 #endif
37 
38 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
39 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
40 					 TEE_MATTR_SECURE)
41 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
42 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
43 					 TEE_MATTR_SECURE)
44 
45 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
46 					 TEE_MATTR_MEM_TYPE_SHIFT)
47 
48 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
49 				  const struct vm_region *next_reg,
50 				  const struct vm_region *reg,
51 				  size_t pad_begin, size_t pad_end,
52 				  size_t granul)
53 {
54 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
55 			    VM_FLAG_SHAREABLE;
56 	vaddr_t begin_va = 0;
57 	vaddr_t end_va = 0;
58 	size_t pad = 0;
59 
60 	/*
61 	 * Insert an unmapped entry to separate regions with differing
62 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
63 	 * bits as they never are to be contiguous with another region.
64 	 */
65 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
66 		pad = SMALL_PAGE_SIZE;
67 	else
68 		pad = 0;
69 
70 #ifndef CFG_WITH_LPAE
71 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
72 	    (reg->attr & TEE_MATTR_SECURE))
73 		granul = CORE_MMU_PGDIR_SIZE;
74 #endif
75 
76 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
78 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
79 	    ROUNDUP_OVERFLOW(begin_va, granul, &begin_va))
80 		return 0;
81 
82 	if (reg->va) {
83 		if (reg->va < begin_va)
84 			return 0;
85 		begin_va = reg->va;
86 	}
87 
88 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
89 		pad = SMALL_PAGE_SIZE;
90 	else
91 		pad = 0;
92 
93 #ifndef CFG_WITH_LPAE
94 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
95 	    (reg->attr & TEE_MATTR_SECURE))
96 		granul = CORE_MMU_PGDIR_SIZE;
97 #endif
98 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
100 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
101 	    ROUNDUP_OVERFLOW(end_va, granul, &end_va))
102 		return 0;
103 
104 	if (end_va <= next_reg->va) {
105 		assert(!reg->va || reg->va == begin_va);
106 		return begin_va;
107 	}
108 
109 	return 0;
110 }
111 
112 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
113 {
114 	struct thread_specific_data *tsd __maybe_unused;
115 
116 	if (!pgt_check_avail(uctx)) {
117 		EMSG("Page tables are not available");
118 		return TEE_ERROR_OUT_OF_MEMORY;
119 	}
120 
121 #ifdef CFG_PAGED_USER_TA
122 	tsd = thread_get_tsd();
123 	if (uctx->ts_ctx == tsd->ctx) {
124 		/*
125 		 * The supplied utc is the current active utc, allocate the
126 		 * page tables too as the pager needs to use them soon.
127 		 */
128 		pgt_get_all(uctx);
129 	}
130 #endif
131 
132 	return TEE_SUCCESS;
133 }
134 
135 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
136 {
137 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
138 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
139 	struct vm_region *r2 = NULL;
140 
141 	if (mobj_is_paged(r->mobj)) {
142 		tee_pager_rem_um_region(uctx, r->va, r->size);
143 	} else {
144 		pgt_clear_range(uctx, r->va, r->va + r->size);
145 		tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
146 				    uctx->vm_info.asid);
147 	}
148 
149 	r2 = TAILQ_NEXT(r, link);
150 	if (r2)
151 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
152 
153 	r2 = TAILQ_PREV(r, vm_region_head, link);
154 	if (r2)
155 		begin = MAX(begin,
156 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
157 
158 	/* If there's no unused page tables, there's nothing left to do */
159 	if (begin >= last)
160 		return;
161 	pgt_flush_range(uctx, r->va, r->va + r->size);
162 }
163 
164 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va,
165 			 paddr_t pa, size_t size, uint32_t attr)
166 {
167 	unsigned int end = core_mmu_va2idx(ti, va + size);
168 	unsigned int idx = core_mmu_va2idx(ti, va);
169 
170 	while (idx < end) {
171 		core_mmu_set_entry(ti, idx, pa, attr);
172 		idx++;
173 		pa += BIT64(ti->shift);
174 	}
175 }
176 
177 static void set_reg_in_table(struct core_mmu_table_info *ti,
178 			     struct vm_region *r)
179 {
180 	vaddr_t va = MAX(r->va, ti->va_base);
181 	vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE);
182 	size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj));
183 	size_t granule = BIT(ti->shift);
184 	size_t offset = 0;
185 	paddr_t pa = 0;
186 
187 	while (va < end) {
188 		offset = va - r->va + r->offset;
189 		if (mobj_get_pa(r->mobj, offset, granule, &pa))
190 			panic("Failed to get PA");
191 		set_pa_range(ti, va, pa, sz, r->attr);
192 		va += sz;
193 	}
194 }
195 
196 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
197 {
198 	struct pgt *p = SLIST_FIRST(&uctx->pgt_cache);
199 	struct core_mmu_table_info ti = { };
200 
201 	assert(!mobj_is_paged(r->mobj));
202 
203 	core_mmu_set_info_table(&ti, CORE_MMU_PGDIR_LEVEL, 0, NULL);
204 
205 	if (p) {
206 		/* All the pgts are already allocated, update in place */
207 		do {
208 			ti.va_base = p->vabase;
209 			ti.table = p->tbl;
210 			set_reg_in_table(&ti, r);
211 			p = SLIST_NEXT(p, link);
212 		} while (p);
213 	} else {
214 		/*
215 		 * We may have a few pgts in the cache list, update the
216 		 * ones found.
217 		 */
218 		for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
219 		     ti.va_base < r->va + r->size;
220 		     ti.va_base += CORE_MMU_PGDIR_SIZE) {
221 			p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx);
222 			if (!p)
223 				continue;
224 			ti.table = p->tbl;
225 			set_reg_in_table(&ti, r);
226 			pgt_push_to_cache_list(p);
227 		}
228 	}
229 }
230 
231 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
232 				  size_t pad_begin, size_t pad_end,
233 				  size_t align)
234 {
235 	struct vm_region dummy_first_reg = { };
236 	struct vm_region dummy_last_reg = { };
237 	struct vm_region *r = NULL;
238 	struct vm_region *prev_r = NULL;
239 	vaddr_t va_range_base = 0;
240 	size_t va_range_size = 0;
241 	size_t granul;
242 	vaddr_t va = 0;
243 	size_t offs_plus_size = 0;
244 
245 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
246 	dummy_first_reg.va = va_range_base;
247 	dummy_last_reg.va = va_range_base + va_range_size;
248 
249 	/* Check alignment, it has to be at least SMALL_PAGE based */
250 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
251 		return TEE_ERROR_ACCESS_CONFLICT;
252 
253 	/* Check that the mobj is defined for the entire range */
254 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
255 		return TEE_ERROR_BAD_PARAMETERS;
256 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
257 		return TEE_ERROR_BAD_PARAMETERS;
258 
259 	granul = MAX(align, SMALL_PAGE_SIZE);
260 	if (!IS_POWER_OF_TWO(granul))
261 		return TEE_ERROR_BAD_PARAMETERS;
262 
263 	prev_r = &dummy_first_reg;
264 	TAILQ_FOREACH(r, &vmi->regions, link) {
265 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
266 					granul);
267 		if (va) {
268 			reg->va = va;
269 			TAILQ_INSERT_BEFORE(r, reg, link);
270 			return TEE_SUCCESS;
271 		}
272 		prev_r = r;
273 	}
274 
275 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
276 	if (!r)
277 		r = &dummy_first_reg;
278 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
279 				granul);
280 	if (va) {
281 		reg->va = va;
282 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
283 		return TEE_SUCCESS;
284 	}
285 
286 	return TEE_ERROR_ACCESS_CONFLICT;
287 }
288 
289 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
290 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
291 		      size_t offs, size_t pad_begin, size_t pad_end,
292 		      size_t align)
293 {
294 	TEE_Result res = TEE_SUCCESS;
295 	struct vm_region *reg = NULL;
296 	uint32_t attr = 0;
297 
298 	if (prot & ~TEE_MATTR_PROT_MASK)
299 		return TEE_ERROR_BAD_PARAMETERS;
300 
301 	reg = calloc(1, sizeof(*reg));
302 	if (!reg)
303 		return TEE_ERROR_OUT_OF_MEMORY;
304 
305 	if (!mobj_is_paged(mobj)) {
306 		uint32_t mem_type = 0;
307 
308 		res = mobj_get_mem_type(mobj, &mem_type);
309 		if (res)
310 			goto err_free_reg;
311 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
312 	}
313 	attr |= TEE_MATTR_VALID_BLOCK;
314 	if (mobj_is_secure(mobj))
315 		attr |= TEE_MATTR_SECURE;
316 
317 	reg->mobj = mobj_get(mobj);
318 	reg->offset = offs;
319 	reg->va = *va;
320 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
321 	reg->attr = attr | prot;
322 	reg->flags = flags;
323 
324 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
325 	if (res)
326 		goto err_put_mobj;
327 
328 	res = alloc_pgt(uctx);
329 	if (res)
330 		goto err_rem_reg;
331 
332 	if (mobj_is_paged(mobj)) {
333 		struct fobj *fobj = mobj_get_fobj(mobj);
334 
335 		if (!fobj) {
336 			res = TEE_ERROR_GENERIC;
337 			goto err_rem_reg;
338 		}
339 
340 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
341 		fobj_put(fobj);
342 		if (res)
343 			goto err_rem_reg;
344 	} else {
345 		set_um_region(uctx, reg);
346 	}
347 
348 	/*
349 	 * If the context currently is active set it again to update
350 	 * the mapping.
351 	 */
352 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
353 		vm_set_ctx(uctx->ts_ctx);
354 
355 	*va = reg->va;
356 
357 	return TEE_SUCCESS;
358 
359 err_rem_reg:
360 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
361 err_put_mobj:
362 	mobj_put(reg->mobj);
363 err_free_reg:
364 	free(reg);
365 	return res;
366 }
367 
368 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
369 {
370 	struct vm_region *r = NULL;
371 
372 	TAILQ_FOREACH(r, &vm_info->regions, link)
373 		if (va >= r->va && va < r->va + r->size)
374 			return r;
375 
376 	return NULL;
377 }
378 
379 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
380 				   size_t len,
381 				   bool (*cmp_regs)(const struct vm_region *r0,
382 						    const struct vm_region *r,
383 						    const struct vm_region *rn))
384 {
385 	struct vm_region *r = r0;
386 	vaddr_t end_va = 0;
387 
388 	if (ADD_OVERFLOW(va, len, &end_va))
389 		return false;
390 
391 	while (true) {
392 		struct vm_region *r_next = TAILQ_NEXT(r, link);
393 		vaddr_t r_end_va = r->va + r->size;
394 
395 		if (r_end_va >= end_va)
396 			return true;
397 		if (!r_next)
398 			return false;
399 		if (r_end_va != r_next->va)
400 			return false;
401 		if (cmp_regs && !cmp_regs(r0, r, r_next))
402 			return false;
403 		r = r_next;
404 	}
405 }
406 
407 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
408 				  struct vm_region *r, vaddr_t va)
409 {
410 	struct vm_region *r2 = NULL;
411 	size_t diff = va - r->va;
412 
413 	assert(diff && diff < r->size);
414 
415 	r2 = calloc(1, sizeof(*r2));
416 	if (!r2)
417 		return TEE_ERROR_OUT_OF_MEMORY;
418 
419 	if (mobj_is_paged(r->mobj)) {
420 		TEE_Result res = tee_pager_split_um_region(uctx, va);
421 
422 		if (res) {
423 			free(r2);
424 			return res;
425 		}
426 	}
427 
428 	r2->mobj = mobj_get(r->mobj);
429 	r2->offset = r->offset + diff;
430 	r2->va = va;
431 	r2->size = r->size - diff;
432 	r2->attr = r->attr;
433 	r2->flags = r->flags;
434 
435 	r->size = diff;
436 
437 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
438 
439 	return TEE_SUCCESS;
440 }
441 
442 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
443 				 size_t len,
444 				 bool (*cmp_regs)(const struct vm_region *r0,
445 						  const struct vm_region *r,
446 						  const struct vm_region *rn),
447 				 struct vm_region **r0_ret)
448 {
449 	TEE_Result res = TEE_SUCCESS;
450 	struct vm_region *r = NULL;
451 	vaddr_t end_va = 0;
452 
453 	if ((va | len) & SMALL_PAGE_MASK)
454 		return TEE_ERROR_BAD_PARAMETERS;
455 
456 	if (ADD_OVERFLOW(va, len, &end_va))
457 		return TEE_ERROR_BAD_PARAMETERS;
458 
459 	/*
460 	 * Find first vm_region in range and check that the entire range is
461 	 * contiguous.
462 	 */
463 	r = find_vm_region(&uctx->vm_info, va);
464 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
465 		return TEE_ERROR_BAD_PARAMETERS;
466 
467 	/*
468 	 * If needed split regions so that va and len covers only complete
469 	 * regions.
470 	 */
471 	if (va != r->va) {
472 		res = split_vm_region(uctx, r, va);
473 		if (res)
474 			return res;
475 		r = TAILQ_NEXT(r, link);
476 	}
477 
478 	*r0_ret = r;
479 	r = find_vm_region(&uctx->vm_info, va + len - 1);
480 	if (!r)
481 		return TEE_ERROR_BAD_PARAMETERS;
482 	if (end_va != r->va + r->size) {
483 		res = split_vm_region(uctx, r, end_va);
484 		if (res)
485 			return res;
486 	}
487 
488 	return TEE_SUCCESS;
489 }
490 
491 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
492 {
493 	struct vm_region *r_next = NULL;
494 	struct vm_region *r = NULL;
495 	vaddr_t end_va = 0;
496 
497 	if (ADD_OVERFLOW(va, len, &end_va))
498 		return;
499 
500 	tee_pager_merge_um_region(uctx, va, len);
501 
502 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
503 		r_next = TAILQ_NEXT(r, link);
504 		if (!r_next)
505 			return;
506 
507 		/* Try merging with the region just before va */
508 		if (r->va + r->size < va)
509 			continue;
510 
511 		/*
512 		 * If r->va is well past our range we're done.
513 		 * Note that if it's just the page after our range we'll
514 		 * try to merge.
515 		 */
516 		if (r->va > end_va)
517 			return;
518 
519 		if (r->va + r->size != r_next->va)
520 			continue;
521 		if (r->mobj != r_next->mobj ||
522 		    r->flags != r_next->flags ||
523 		    r->attr != r_next->attr)
524 			continue;
525 		if (r->offset + r->size != r_next->offset)
526 			continue;
527 
528 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
529 		r->size += r_next->size;
530 		mobj_put(r_next->mobj);
531 		free(r_next);
532 		r_next = r;
533 	}
534 }
535 
536 static bool cmp_region_for_remap(const struct vm_region *r0,
537 				 const struct vm_region *r,
538 				 const struct vm_region *rn)
539 {
540 	/*
541 	 * All the essentionals has to match for remap to make sense. The
542 	 * essentials are, mobj/fobj, attr, flags and the offset should be
543 	 * contiguous.
544 	 *
545 	 * Note that vm_remap() depends on mobj/fobj to be the same.
546 	 */
547 	return r0->flags == r->flags && r0->attr == r->attr &&
548 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
549 }
550 
551 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
552 		    size_t len, size_t pad_begin, size_t pad_end)
553 {
554 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
555 	TEE_Result res = TEE_SUCCESS;
556 	struct vm_region *r0 = NULL;
557 	struct vm_region *r = NULL;
558 	struct vm_region *r_next = NULL;
559 	struct vm_region *r_last = NULL;
560 	struct vm_region *r_first = NULL;
561 	struct fobj *fobj = NULL;
562 	vaddr_t next_va = 0;
563 
564 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
565 
566 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
567 		return TEE_ERROR_BAD_PARAMETERS;
568 
569 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
570 	if (res)
571 		return res;
572 
573 	if (mobj_is_paged(r0->mobj)) {
574 		fobj = mobj_get_fobj(r0->mobj);
575 		if (!fobj)
576 			panic();
577 	}
578 
579 	for (r = r0; r; r = r_next) {
580 		if (r->va + r->size > old_va + len)
581 			break;
582 		r_next = TAILQ_NEXT(r, link);
583 		rem_um_region(uctx, r);
584 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
585 		TAILQ_INSERT_TAIL(&regs, r, link);
586 	}
587 
588 	/*
589 	 * Synchronize change to translation tables. Even though the pager
590 	 * case unmaps immediately we may still free a translation table.
591 	 */
592 	vm_set_ctx(uctx->ts_ctx);
593 
594 	r_first = TAILQ_FIRST(&regs);
595 	while (!TAILQ_EMPTY(&regs)) {
596 		r = TAILQ_FIRST(&regs);
597 		TAILQ_REMOVE(&regs, r, link);
598 		if (r_last) {
599 			r->va = r_last->va + r_last->size;
600 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
601 		} else {
602 			r->va = *new_va;
603 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
604 					      pad_end + len - r->size, 0);
605 		}
606 		if (!res) {
607 			r_last = r;
608 			res = alloc_pgt(uctx);
609 		}
610 		if (!res) {
611 			if (!fobj)
612 				set_um_region(uctx, r);
613 			else
614 				res = tee_pager_add_um_region(uctx, r->va, fobj,
615 							      r->attr);
616 		}
617 
618 		if (res) {
619 			/*
620 			 * Something went wrong move all the recently added
621 			 * regions back to regs for later reinsertion at
622 			 * the original spot.
623 			 */
624 			struct vm_region *r_tmp = NULL;
625 			struct vm_region *r_stop = NULL;
626 
627 			if (r != r_last) {
628 				/*
629 				 * umap_add_region() failed, move r back to
630 				 * regs before all the rest are moved back.
631 				 */
632 				TAILQ_INSERT_HEAD(&regs, r, link);
633 			}
634 			if (r_last)
635 				r_stop = TAILQ_NEXT(r_last, link);
636 			for (r = r_first; r != r_stop; r = r_next) {
637 				r_next = TAILQ_NEXT(r, link);
638 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
639 				if (r_tmp)
640 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
641 							   link);
642 				else
643 					TAILQ_INSERT_HEAD(&regs, r, link);
644 				r_tmp = r;
645 			}
646 
647 			goto err_restore_map;
648 		}
649 	}
650 
651 	fobj_put(fobj);
652 
653 	vm_set_ctx(uctx->ts_ctx);
654 	*new_va = r_first->va;
655 
656 	return TEE_SUCCESS;
657 
658 err_restore_map:
659 	next_va = old_va;
660 	while (!TAILQ_EMPTY(&regs)) {
661 		r = TAILQ_FIRST(&regs);
662 		TAILQ_REMOVE(&regs, r, link);
663 		r->va = next_va;
664 		next_va += r->size;
665 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
666 			panic("Cannot restore mapping");
667 		if (alloc_pgt(uctx))
668 			panic("Cannot restore mapping");
669 		if (fobj) {
670 			if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
671 				panic("Cannot restore mapping");
672 		} else {
673 			set_um_region(uctx, r);
674 		}
675 	}
676 	fobj_put(fobj);
677 	vm_set_ctx(uctx->ts_ctx);
678 
679 	return res;
680 }
681 
682 static bool cmp_region_for_get_flags(const struct vm_region *r0,
683 				     const struct vm_region *r,
684 				     const struct vm_region *rn __unused)
685 {
686 	return r0->flags == r->flags;
687 }
688 
689 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
690 			uint32_t *flags)
691 {
692 	struct vm_region *r = NULL;
693 
694 	if (!len || ((len | va) & SMALL_PAGE_MASK))
695 		return TEE_ERROR_BAD_PARAMETERS;
696 
697 	r = find_vm_region(&uctx->vm_info, va);
698 	if (!r)
699 		return TEE_ERROR_BAD_PARAMETERS;
700 
701 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
702 		return TEE_ERROR_BAD_PARAMETERS;
703 
704 	*flags = r->flags;
705 
706 	return TEE_SUCCESS;
707 }
708 
709 static bool cmp_region_for_get_prot(const struct vm_region *r0,
710 				    const struct vm_region *r,
711 				    const struct vm_region *rn __unused)
712 {
713 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
714 	       (r->attr & TEE_MATTR_PROT_MASK);
715 }
716 
717 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
718 		       uint16_t *prot)
719 {
720 	struct vm_region *r = NULL;
721 
722 	if (!len || ((len | va) & SMALL_PAGE_MASK))
723 		return TEE_ERROR_BAD_PARAMETERS;
724 
725 	r = find_vm_region(&uctx->vm_info, va);
726 	if (!r)
727 		return TEE_ERROR_BAD_PARAMETERS;
728 
729 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
730 		return TEE_ERROR_BAD_PARAMETERS;
731 
732 	*prot = r->attr & TEE_MATTR_PROT_MASK;
733 
734 	return TEE_SUCCESS;
735 }
736 
737 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
738 		       uint32_t prot)
739 {
740 	TEE_Result res = TEE_SUCCESS;
741 	struct vm_region *r0 = NULL;
742 	struct vm_region *r = NULL;
743 	bool was_writeable = false;
744 	bool need_sync = false;
745 
746 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
747 
748 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
749 		return TEE_ERROR_BAD_PARAMETERS;
750 
751 	res = split_vm_range(uctx, va, len, NULL, &r0);
752 	if (res)
753 		return res;
754 
755 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
756 		if (r->va + r->size > va + len)
757 			break;
758 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
759 			was_writeable = true;
760 
761 		r->attr &= ~TEE_MATTR_PROT_MASK;
762 		r->attr |= prot;
763 
764 		if (!mobj_is_paged(r->mobj)) {
765 			need_sync = true;
766 			set_um_region(uctx, r);
767 			/*
768 			 * Normally when set_um_region() is called we
769 			 * change from no mapping to some mapping, but in
770 			 * this case we change the permissions on an
771 			 * already present mapping so some TLB invalidation
772 			 * is needed. We also depend on the dsb() performed
773 			 * as part of the TLB invalidation.
774 			 */
775 			tlbi_mva_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
776 					    uctx->vm_info.asid);
777 		}
778 	}
779 
780 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
781 		if (r->va + r->size > va + len)
782 			break;
783 		if (mobj_is_paged(r->mobj)) {
784 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
785 							  prot))
786 				panic();
787 		} else if (was_writeable) {
788 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
789 				       r->size);
790 		}
791 
792 	}
793 	if (need_sync && was_writeable)
794 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
795 
796 	merge_vm_range(uctx, va, len);
797 
798 	return TEE_SUCCESS;
799 }
800 
801 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
802 {
803 	TAILQ_REMOVE(&vmi->regions, reg, link);
804 	mobj_put(reg->mobj);
805 	free(reg);
806 }
807 
808 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
809 {
810 	TEE_Result res = TEE_SUCCESS;
811 	struct vm_region *r = NULL;
812 	struct vm_region *r_next = NULL;
813 	size_t end_va = 0;
814 	size_t unmap_end_va = 0;
815 	size_t l = 0;
816 
817 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
818 
819 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
820 		return TEE_ERROR_BAD_PARAMETERS;
821 
822 	if (!l || (va & SMALL_PAGE_MASK))
823 		return TEE_ERROR_BAD_PARAMETERS;
824 
825 	if (ADD_OVERFLOW(va, l, &end_va))
826 		return TEE_ERROR_BAD_PARAMETERS;
827 
828 	res = split_vm_range(uctx, va, l, NULL, &r);
829 	if (res)
830 		return res;
831 
832 	while (true) {
833 		r_next = TAILQ_NEXT(r, link);
834 		unmap_end_va = r->va + r->size;
835 		rem_um_region(uctx, r);
836 		umap_remove_region(&uctx->vm_info, r);
837 		if (!r_next || unmap_end_va == end_va)
838 			break;
839 		r = r_next;
840 	}
841 
842 	return TEE_SUCCESS;
843 }
844 
845 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
846 {
847 	TEE_Result res = TEE_SUCCESS;
848 	struct mobj *mobj = NULL;
849 	size_t offs = 0;
850 	vaddr_t va = 0;
851 	size_t sz = 0;
852 	uint32_t prot = 0;
853 
854 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
855 	if (sz) {
856 		prot = TEE_MATTR_PRX;
857 		if (IS_ENABLED(CFG_CORE_BTI))
858 			prot |= TEE_MATTR_GUARDED;
859 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
860 			     mobj, offs);
861 		if (res)
862 			return res;
863 	}
864 
865 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
866 	if (sz)
867 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
868 			      mobj, offs);
869 
870 	return TEE_SUCCESS;
871 }
872 
873 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
874 {
875 	TEE_Result res;
876 	uint32_t asid = asid_alloc();
877 
878 	if (!asid) {
879 		DMSG("Failed to allocate ASID");
880 		return TEE_ERROR_GENERIC;
881 	}
882 
883 	memset(uctx, 0, sizeof(*uctx));
884 	TAILQ_INIT(&uctx->vm_info.regions);
885 	SLIST_INIT(&uctx->pgt_cache);
886 	uctx->vm_info.asid = asid;
887 	uctx->ts_ctx = ts_ctx;
888 
889 	res = map_kinit(uctx);
890 	if (res)
891 		vm_info_final(uctx);
892 	return res;
893 }
894 
895 void vm_clean_param(struct user_mode_ctx *uctx)
896 {
897 	struct vm_region *next_r;
898 	struct vm_region *r;
899 
900 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
901 		if (r->flags & VM_FLAG_EPHEMERAL) {
902 			rem_um_region(uctx, r);
903 			umap_remove_region(&uctx->vm_info, r);
904 		}
905 	}
906 }
907 
908 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
909 {
910 	struct vm_region *r = NULL;
911 
912 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
913 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
914 }
915 
916 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
917 				       struct param_mem *mem, void **user_va)
918 {
919 	struct vm_region *region = NULL;
920 
921 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
922 		vaddr_t va = 0;
923 		size_t phys_offs = 0;
924 
925 		if (!(region->flags & VM_FLAG_EPHEMERAL))
926 			continue;
927 		if (mem->mobj != region->mobj)
928 			continue;
929 
930 		phys_offs = mobj_get_phys_offs(mem->mobj,
931 					       CORE_MMU_USER_PARAM_SIZE);
932 		phys_offs += mem->offs;
933 		if (phys_offs < region->offset)
934 			continue;
935 		if (phys_offs >= (region->offset + region->size))
936 			continue;
937 		va = region->va + phys_offs - region->offset;
938 		*user_va = (void *)va;
939 		return TEE_SUCCESS;
940 	}
941 	return TEE_ERROR_GENERIC;
942 }
943 
944 static int cmp_param_mem(const void *a0, const void *a1)
945 {
946 	const struct param_mem *m1 = a1;
947 	const struct param_mem *m0 = a0;
948 	int ret;
949 
950 	/* Make sure that invalid param_mem are placed last in the array */
951 	if (!m0->mobj && !m1->mobj)
952 		return 0;
953 	if (!m0->mobj)
954 		return 1;
955 	if (!m1->mobj)
956 		return -1;
957 
958 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
959 	if (ret)
960 		return ret;
961 
962 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
963 	if (ret)
964 		return ret;
965 
966 	ret = CMP_TRILEAN(m0->offs, m1->offs);
967 	if (ret)
968 		return ret;
969 
970 	return CMP_TRILEAN(m0->size, m1->size);
971 }
972 
973 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
974 			void *param_va[TEE_NUM_PARAMS])
975 {
976 	TEE_Result res = TEE_SUCCESS;
977 	size_t n;
978 	size_t m;
979 	struct param_mem mem[TEE_NUM_PARAMS];
980 
981 	memset(mem, 0, sizeof(mem));
982 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
983 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
984 		size_t phys_offs;
985 
986 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
987 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
988 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
989 			continue;
990 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
991 					       CORE_MMU_USER_PARAM_SIZE);
992 		mem[n].mobj = param->u[n].mem.mobj;
993 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
994 					CORE_MMU_USER_PARAM_SIZE);
995 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
996 				      mem[n].offs + param->u[n].mem.size,
997 				      CORE_MMU_USER_PARAM_SIZE);
998 		/*
999 		 * For size 0 (raw pointer parameter), add minimum size
1000 		 * value to allow address to be mapped
1001 		 */
1002 		if (!mem[n].size)
1003 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
1004 	}
1005 
1006 	/*
1007 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
1008 	 * mobj pointer value since those entries can't be merged either,
1009 	 * finally by offset.
1010 	 *
1011 	 * This should result in a list where all mergeable entries are
1012 	 * next to each other and unused/invalid entries are at the end.
1013 	 */
1014 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
1015 
1016 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
1017 		if (mem[n].mobj == mem[m].mobj &&
1018 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
1019 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
1020 					      mem[n].offs, mem[n].size))) {
1021 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
1022 			continue;
1023 		}
1024 		m++;
1025 		if (n != m)
1026 			mem[m] = mem[n];
1027 	}
1028 	/*
1029 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
1030 	 * index of the last valid entry if the first entry is valid, else
1031 	 * 0.
1032 	 */
1033 	if (mem[0].mobj)
1034 		m++;
1035 
1036 	check_param_map_empty(uctx);
1037 
1038 	for (n = 0; n < m; n++) {
1039 		vaddr_t va = 0;
1040 
1041 		res = vm_map(uctx, &va, mem[n].size,
1042 			     TEE_MATTR_PRW | TEE_MATTR_URW,
1043 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
1044 			     mem[n].mobj, mem[n].offs);
1045 		if (res)
1046 			goto out;
1047 	}
1048 
1049 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
1050 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
1051 
1052 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
1053 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1054 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1055 			continue;
1056 		if (!param->u[n].mem.mobj)
1057 			continue;
1058 
1059 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1060 					   param_va + n);
1061 		if (res != TEE_SUCCESS)
1062 			goto out;
1063 	}
1064 
1065 	res = alloc_pgt(uctx);
1066 out:
1067 	if (res)
1068 		vm_clean_param(uctx);
1069 
1070 	return res;
1071 }
1072 
1073 TEE_Result vm_add_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj,
1074 			vaddr_t *va)
1075 {
1076 	TEE_Result res = TEE_SUCCESS;
1077 	struct vm_region *reg = NULL;
1078 
1079 	if (!mobj_is_secure(mobj) || !mobj_is_paged(mobj))
1080 		return TEE_ERROR_BAD_PARAMETERS;
1081 
1082 	reg = calloc(1, sizeof(*reg));
1083 	if (!reg)
1084 		return TEE_ERROR_OUT_OF_MEMORY;
1085 
1086 	reg->mobj = mobj;
1087 	reg->offset = 0;
1088 	reg->va = 0;
1089 	reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE);
1090 	reg->attr = TEE_MATTR_SECURE;
1091 
1092 	res = umap_add_region(&uctx->vm_info, reg, 0, 0, 0);
1093 	if (res) {
1094 		free(reg);
1095 		return res;
1096 	}
1097 
1098 	res = alloc_pgt(uctx);
1099 	if (res)
1100 		umap_remove_region(&uctx->vm_info, reg);
1101 	else
1102 		*va = reg->va;
1103 
1104 	return res;
1105 }
1106 
1107 void vm_rem_rwmem(struct user_mode_ctx *uctx, struct mobj *mobj, vaddr_t va)
1108 {
1109 	struct vm_region *r = NULL;
1110 
1111 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1112 		if (r->mobj == mobj && r->va == va) {
1113 			rem_um_region(uctx, r);
1114 			umap_remove_region(&uctx->vm_info, r);
1115 			return;
1116 		}
1117 	}
1118 }
1119 
1120 void vm_info_final(struct user_mode_ctx *uctx)
1121 {
1122 	if (!uctx->vm_info.asid)
1123 		return;
1124 
1125 	pgt_flush(uctx);
1126 	tee_pager_rem_um_regions(uctx);
1127 
1128 	/* clear MMU entries to avoid clash when asid is reused */
1129 	tlbi_asid(uctx->vm_info.asid);
1130 
1131 	asid_free(uctx->vm_info.asid);
1132 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1133 		umap_remove_region(&uctx->vm_info,
1134 				   TAILQ_FIRST(&uctx->vm_info.regions));
1135 	memset(&uctx->vm_info, 0, sizeof(uctx->vm_info));
1136 }
1137 
1138 /* return true only if buffer fits inside TA private memory */
1139 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1140 				 const void *va, size_t size)
1141 {
1142 	struct vm_region *r = NULL;
1143 
1144 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1145 		if (r->flags & VM_FLAGS_NONPRIV)
1146 			continue;
1147 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1148 			return true;
1149 	}
1150 
1151 	return false;
1152 }
1153 
1154 /* return true only if buffer intersects TA private memory */
1155 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1156 				  const void *va, size_t size)
1157 {
1158 	struct vm_region *r = NULL;
1159 
1160 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1161 		if (r->attr & VM_FLAGS_NONPRIV)
1162 			continue;
1163 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1164 			return true;
1165 	}
1166 
1167 	return false;
1168 }
1169 
1170 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1171 			       const void *va, size_t size,
1172 			       struct mobj **mobj, size_t *offs)
1173 {
1174 	struct vm_region *r = NULL;
1175 
1176 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1177 		if (!r->mobj)
1178 			continue;
1179 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1180 			size_t poffs;
1181 
1182 			poffs = mobj_get_phys_offs(r->mobj,
1183 						   CORE_MMU_USER_PARAM_SIZE);
1184 			*mobj = r->mobj;
1185 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1186 			return TEE_SUCCESS;
1187 		}
1188 	}
1189 
1190 	return TEE_ERROR_BAD_PARAMETERS;
1191 }
1192 
1193 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1194 					  void *ua, paddr_t *pa, uint32_t *attr)
1195 {
1196 	struct vm_region *region = NULL;
1197 
1198 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1199 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1200 					   region->size))
1201 			continue;
1202 
1203 		if (pa) {
1204 			TEE_Result res;
1205 			paddr_t p;
1206 			size_t offset;
1207 			size_t granule;
1208 
1209 			/*
1210 			 * mobj and input user address may each include
1211 			 * a specific offset-in-granule position.
1212 			 * Drop both to get target physical page base
1213 			 * address then apply only user address
1214 			 * offset-in-granule.
1215 			 * Mapping lowest granule is the small page.
1216 			 */
1217 			granule = MAX(region->mobj->phys_granule,
1218 				      (size_t)SMALL_PAGE_SIZE);
1219 			assert(!granule || IS_POWER_OF_TWO(granule));
1220 
1221 			offset = region->offset +
1222 				 ROUNDDOWN((vaddr_t)ua - region->va, granule);
1223 
1224 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1225 			if (res != TEE_SUCCESS)
1226 				return res;
1227 
1228 			*pa = p | ((vaddr_t)ua & (granule - 1));
1229 		}
1230 		if (attr)
1231 			*attr = region->attr;
1232 
1233 		return TEE_SUCCESS;
1234 	}
1235 
1236 	return TEE_ERROR_ACCESS_DENIED;
1237 }
1238 
1239 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1240 {
1241 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1242 }
1243 
1244 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1245 {
1246 	paddr_t p = 0;
1247 	struct vm_region *region = NULL;
1248 
1249 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1250 		size_t granule = 0;
1251 		size_t size = 0;
1252 		size_t ofs = 0;
1253 
1254 		/* pa2va is expected only for memory tracked through mobj */
1255 		if (!region->mobj)
1256 			continue;
1257 
1258 		/* Physically granulated memory object must be scanned */
1259 		granule = region->mobj->phys_granule;
1260 		assert(!granule || IS_POWER_OF_TWO(granule));
1261 
1262 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1263 
1264 			if (granule) {
1265 				/* From current offset to buffer/granule end */
1266 				size = granule - (ofs & (granule - 1));
1267 
1268 				if (size > (region->size - ofs))
1269 					size = region->size - ofs;
1270 			} else {
1271 				size = region->size;
1272 			}
1273 
1274 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1275 				continue;
1276 
1277 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1278 				/* Remove region offset (mobj phys offset) */
1279 				ofs -= region->offset;
1280 				/* Get offset-in-granule */
1281 				p = pa - p;
1282 
1283 				return (void *)(region->va + ofs + (vaddr_t)p);
1284 			}
1285 		}
1286 	}
1287 
1288 	return NULL;
1289 }
1290 
1291 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1292 				  uint32_t flags, uaddr_t uaddr, size_t len)
1293 {
1294 	uaddr_t a = 0;
1295 	uaddr_t end_addr = 0;
1296 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1297 			       CORE_MMU_USER_PARAM_SIZE);
1298 
1299 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1300 		return TEE_ERROR_ACCESS_DENIED;
1301 
1302 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1303 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1304 		return TEE_ERROR_ACCESS_DENIED;
1305 
1306 	/*
1307 	 * Rely on TA private memory test to check if address range is private
1308 	 * to TA or not.
1309 	 */
1310 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1311 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1312 		return TEE_ERROR_ACCESS_DENIED;
1313 
1314 	for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1315 		uint32_t attr;
1316 		TEE_Result res;
1317 
1318 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1319 		if (res != TEE_SUCCESS)
1320 			return res;
1321 
1322 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1323 		    (attr & TEE_MATTR_SECURE))
1324 			return TEE_ERROR_ACCESS_DENIED;
1325 
1326 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1327 		    !(attr & TEE_MATTR_SECURE))
1328 			return TEE_ERROR_ACCESS_DENIED;
1329 
1330 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1331 			return TEE_ERROR_ACCESS_DENIED;
1332 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1333 			return TEE_ERROR_ACCESS_DENIED;
1334 	}
1335 
1336 	return TEE_SUCCESS;
1337 }
1338 
1339 void vm_set_ctx(struct ts_ctx *ctx)
1340 {
1341 	struct thread_specific_data *tsd = thread_get_tsd();
1342 	struct user_mode_ctx *uctx = NULL;
1343 
1344 	core_mmu_set_user_map(NULL);
1345 
1346 	if (is_user_mode_ctx(tsd->ctx)) {
1347 		/*
1348 		 * We're coming from a user mode context so we must make
1349 		 * the pgts available for reuse.
1350 		 */
1351 		uctx = to_user_mode_ctx(tsd->ctx);
1352 		pgt_put_all(uctx);
1353 	}
1354 
1355 	if (is_user_mode_ctx(ctx)) {
1356 		struct core_mmu_user_map map = { };
1357 
1358 		uctx = to_user_mode_ctx(ctx);
1359 		core_mmu_create_user_map(uctx, &map);
1360 		core_mmu_set_user_map(&map);
1361 		tee_pager_assign_um_tables(uctx);
1362 	}
1363 	tsd->ctx = ctx;
1364 }
1365 
1366 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1367 			 uint16_t *prot, size_t *offs)
1368 {
1369 	struct vm_region *r = NULL;
1370 	size_t r_offs = 0;
1371 
1372 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1373 		return NULL;
1374 
1375 	r = find_vm_region(&uctx->vm_info, va);
1376 	if (!r)
1377 		return NULL;
1378 
1379 	r_offs = va - r->va;
1380 
1381 	*len = MIN(r->size - r_offs, *len);
1382 	*offs = r->offset + r_offs;
1383 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1384 	return mobj_get(r->mobj);
1385 }
1386