xref: /optee_os/core/mm/vm.c (revision c0b4fb69b55b2d16d992d8387ffc69b4c810e0d6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2021, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2021, Arm Limited
6  */
7 
8 #include <assert.h>
9 #include <config.h>
10 #include <initcall.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tlb_helpers.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <mm/core_memprot.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <mm/pgt_cache.h>
21 #include <mm/tee_mm.h>
22 #include <mm/tee_mmu_types.h>
23 #include <mm/tee_pager.h>
24 #include <mm/vm.h>
25 #include <stdlib.h>
26 #include <tee_api_defines_extensions.h>
27 #include <tee_api_types.h>
28 #include <trace.h>
29 #include <types_ext.h>
30 #include <user_ta_header.h>
31 #include <util.h>
32 
33 #ifdef CFG_PL310
34 #include <kernel/tee_l2cc_mutex.h>
35 #endif
36 
37 #define TEE_MMU_UDATA_ATTR		(TEE_MATTR_VALID_BLOCK | \
38 					 TEE_MATTR_PRW | TEE_MATTR_URW | \
39 					 TEE_MATTR_SECURE)
40 #define TEE_MMU_UCODE_ATTR		(TEE_MATTR_VALID_BLOCK | \
41 					 TEE_MATTR_PRW | TEE_MATTR_URWX | \
42 					 TEE_MATTR_SECURE)
43 
44 #define TEE_MMU_UCACHE_DEFAULT_ATTR	(TEE_MATTR_MEM_TYPE_CACHED << \
45 					 TEE_MATTR_MEM_TYPE_SHIFT)
46 
select_va_in_range(const struct vm_region * prev_reg,const struct vm_region * next_reg,const struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t granul)47 static vaddr_t select_va_in_range(const struct vm_region *prev_reg,
48 				  const struct vm_region *next_reg,
49 				  const struct vm_region *reg,
50 				  size_t pad_begin, size_t pad_end,
51 				  size_t granul)
52 {
53 	const uint32_t f = VM_FLAG_EPHEMERAL | VM_FLAG_PERMANENT |
54 			    VM_FLAG_SHAREABLE;
55 	vaddr_t begin_va = 0;
56 	vaddr_t end_va = 0;
57 	size_t pad = 0;
58 
59 	/*
60 	 * Insert an unmapped entry to separate regions with differing
61 	 * VM_FLAG_EPHEMERAL, VM_FLAG_PERMANENT or VM_FLAG_SHAREABLE
62 	 * bits as they never are to be contiguous with another region.
63 	 */
64 	if (prev_reg->flags && (prev_reg->flags & f) != (reg->flags & f))
65 		pad = SMALL_PAGE_SIZE;
66 	else
67 		pad = 0;
68 
69 #ifndef CFG_WITH_LPAE
70 	if ((prev_reg->attr & TEE_MATTR_SECURE) !=
71 	    (reg->attr & TEE_MATTR_SECURE))
72 		granul = CORE_MMU_PGDIR_SIZE;
73 #endif
74 
75 	if (ADD_OVERFLOW(prev_reg->va, prev_reg->size, &begin_va) ||
76 	    ADD_OVERFLOW(begin_va, pad_begin, &begin_va) ||
77 	    ADD_OVERFLOW(begin_va, pad, &begin_va) ||
78 	    ROUNDUP2_OVERFLOW(begin_va, granul, &begin_va))
79 		return 0;
80 
81 	if (reg->va) {
82 		if (reg->va < begin_va)
83 			return 0;
84 		begin_va = reg->va;
85 	}
86 
87 	if (next_reg->flags && (next_reg->flags & f) != (reg->flags & f))
88 		pad = SMALL_PAGE_SIZE;
89 	else
90 		pad = 0;
91 
92 #ifndef CFG_WITH_LPAE
93 	if ((next_reg->attr & TEE_MATTR_SECURE) !=
94 	    (reg->attr & TEE_MATTR_SECURE))
95 		granul = CORE_MMU_PGDIR_SIZE;
96 #endif
97 	if (ADD_OVERFLOW(begin_va, reg->size, &end_va) ||
98 	    ADD_OVERFLOW(end_va, pad_end, &end_va) ||
99 	    ADD_OVERFLOW(end_va, pad, &end_va) ||
100 	    ROUNDUP2_OVERFLOW(end_va, granul, &end_va))
101 		return 0;
102 
103 	if (end_va <= next_reg->va) {
104 		assert(!reg->va || reg->va == begin_va);
105 		return begin_va;
106 	}
107 
108 	return 0;
109 }
110 
alloc_pgt(struct user_mode_ctx * uctx)111 static TEE_Result alloc_pgt(struct user_mode_ctx *uctx)
112 {
113 	struct thread_specific_data *tsd __maybe_unused;
114 
115 	if (!pgt_check_avail(uctx)) {
116 		EMSG("Page tables are not available");
117 		return TEE_ERROR_OUT_OF_MEMORY;
118 	}
119 
120 #ifdef CFG_PAGED_USER_TA
121 	tsd = thread_get_tsd();
122 	if (uctx->ts_ctx == tsd->ctx) {
123 		/*
124 		 * The supplied utc is the current active utc, allocate the
125 		 * page tables too as the pager needs to use them soon.
126 		 */
127 		pgt_get_all(uctx);
128 	}
129 #endif
130 
131 	return TEE_SUCCESS;
132 }
133 
rem_um_region(struct user_mode_ctx * uctx,struct vm_region * r)134 static void rem_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
135 {
136 	vaddr_t begin = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
137 	vaddr_t last = ROUNDUP(r->va + r->size, CORE_MMU_PGDIR_SIZE);
138 	struct vm_region *r2 = NULL;
139 
140 	if (mobj_is_paged(r->mobj)) {
141 		tee_pager_rem_um_region(uctx, r->va, r->size);
142 	} else {
143 		pgt_clear_range(uctx, r->va, r->va + r->size);
144 		tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
145 				   uctx->vm_info.asid);
146 	}
147 
148 	/*
149 	 * Figure out how much virtual memory on a CORE_MMU_PGDIR_SIZE
150 	 * grunalarity can be freed. Only completely unused
151 	 * CORE_MMU_PGDIR_SIZE ranges can be supplied to pgt_flush_range().
152 	 *
153 	 * Note that there's is no margin for error here, both flushing too
154 	 * many or too few translation tables can be fatal.
155 	 */
156 	r2 = TAILQ_NEXT(r, link);
157 	if (r2)
158 		last = MIN(last, ROUNDDOWN(r2->va, CORE_MMU_PGDIR_SIZE));
159 
160 	r2 = TAILQ_PREV(r, vm_region_head, link);
161 	if (r2)
162 		begin = MAX(begin,
163 			    ROUNDUP(r2->va + r2->size, CORE_MMU_PGDIR_SIZE));
164 
165 	if (begin < last)
166 		pgt_flush_range(uctx, begin, last);
167 }
168 
set_pa_range(struct core_mmu_table_info * ti,vaddr_t va,paddr_t pa,size_t size,uint32_t attr)169 static void set_pa_range(struct core_mmu_table_info *ti, vaddr_t va,
170 			 paddr_t pa, size_t size, uint32_t attr)
171 {
172 	unsigned int end = core_mmu_va2idx(ti, va + size);
173 	unsigned int idx = core_mmu_va2idx(ti, va);
174 
175 	while (idx < end) {
176 		core_mmu_set_entry(ti, idx, pa, attr);
177 		idx++;
178 		pa += BIT64(ti->shift);
179 	}
180 }
181 
set_reg_in_table(struct core_mmu_table_info * ti,struct vm_region * r)182 static void set_reg_in_table(struct core_mmu_table_info *ti,
183 			     struct vm_region *r)
184 {
185 	vaddr_t va = MAX(r->va, ti->va_base);
186 	vaddr_t end = MIN(r->va + r->size, ti->va_base + CORE_MMU_PGDIR_SIZE);
187 	size_t sz = MIN(end - va, mobj_get_phys_granule(r->mobj));
188 	size_t granule = BIT(ti->shift);
189 	size_t offset = 0;
190 	paddr_t pa = 0;
191 
192 	while (va < end) {
193 		offset = va - r->va + r->offset;
194 		if (mobj_get_pa(r->mobj, offset, granule, &pa))
195 			panic("Failed to get PA");
196 		set_pa_range(ti, va, pa, sz, r->attr);
197 		va += sz;
198 	}
199 }
200 
set_um_region(struct user_mode_ctx * uctx,struct vm_region * r)201 static void set_um_region(struct user_mode_ctx *uctx, struct vm_region *r)
202 {
203 	struct pgt *p = SLIST_FIRST(&uctx->pgt_cache);
204 	struct core_mmu_table_info ti = { };
205 
206 	assert(!mobj_is_paged(r->mobj));
207 
208 	core_mmu_set_info_table(&ti, CORE_MMU_PGDIR_LEVEL, 0, NULL);
209 
210 	if (p) {
211 		/* All the pgts are already allocated, update in place */
212 		do {
213 			ti.va_base = p->vabase;
214 			ti.table = p->tbl;
215 			set_reg_in_table(&ti, r);
216 			p = SLIST_NEXT(p, link);
217 		} while (p);
218 	} else {
219 		/*
220 		 * We may have a few pgts in the cache list, update the
221 		 * ones found.
222 		 */
223 		for (ti.va_base = ROUNDDOWN(r->va, CORE_MMU_PGDIR_SIZE);
224 		     ti.va_base < r->va + r->size;
225 		     ti.va_base += CORE_MMU_PGDIR_SIZE) {
226 			p = pgt_pop_from_cache_list(ti.va_base, uctx->ts_ctx);
227 			if (!p)
228 				continue;
229 			ti.table = p->tbl;
230 			set_reg_in_table(&ti, r);
231 			pgt_push_to_cache_list(p);
232 		}
233 	}
234 }
235 
umap_add_region(struct vm_info * vmi,struct vm_region * reg,size_t pad_begin,size_t pad_end,size_t align)236 static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg,
237 				  size_t pad_begin, size_t pad_end,
238 				  size_t align)
239 {
240 	struct vm_region dummy_first_reg = { };
241 	struct vm_region dummy_last_reg = { };
242 	struct vm_region *r = NULL;
243 	struct vm_region *prev_r = NULL;
244 	vaddr_t va_range_base = 0;
245 	size_t va_range_size = 0;
246 	size_t granul;
247 	vaddr_t va = 0;
248 	size_t offs_plus_size = 0;
249 
250 	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
251 	dummy_first_reg.va = va_range_base;
252 	dummy_last_reg.va = va_range_base + va_range_size;
253 
254 	/* Check alignment, it has to be at least SMALL_PAGE based */
255 	if ((reg->va | reg->size | pad_begin | pad_end) & SMALL_PAGE_MASK)
256 		return TEE_ERROR_ACCESS_CONFLICT;
257 
258 	/* Check that the mobj is defined for the entire range */
259 	if (ADD_OVERFLOW(reg->offset, reg->size, &offs_plus_size))
260 		return TEE_ERROR_BAD_PARAMETERS;
261 	if (offs_plus_size > ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE))
262 		return TEE_ERROR_BAD_PARAMETERS;
263 
264 	granul = MAX(align, SMALL_PAGE_SIZE);
265 	if (!IS_POWER_OF_TWO(granul))
266 		return TEE_ERROR_BAD_PARAMETERS;
267 
268 	prev_r = &dummy_first_reg;
269 	TAILQ_FOREACH(r, &vmi->regions, link) {
270 		va = select_va_in_range(prev_r, r, reg, pad_begin, pad_end,
271 					granul);
272 		if (va) {
273 			reg->va = va;
274 			TAILQ_INSERT_BEFORE(r, reg, link);
275 			return TEE_SUCCESS;
276 		}
277 		prev_r = r;
278 	}
279 
280 	r = TAILQ_LAST(&vmi->regions, vm_region_head);
281 	if (!r)
282 		r = &dummy_first_reg;
283 	va = select_va_in_range(r, &dummy_last_reg, reg, pad_begin, pad_end,
284 				granul);
285 	if (va) {
286 		reg->va = va;
287 		TAILQ_INSERT_TAIL(&vmi->regions, reg, link);
288 		return TEE_SUCCESS;
289 	}
290 
291 	return TEE_ERROR_ACCESS_CONFLICT;
292 }
293 
vm_map_pad(struct user_mode_ctx * uctx,vaddr_t * va,size_t len,uint32_t prot,uint32_t flags,struct mobj * mobj,size_t offs,size_t pad_begin,size_t pad_end,size_t align)294 TEE_Result vm_map_pad(struct user_mode_ctx *uctx, vaddr_t *va, size_t len,
295 		      uint32_t prot, uint32_t flags, struct mobj *mobj,
296 		      size_t offs, size_t pad_begin, size_t pad_end,
297 		      size_t align)
298 {
299 	TEE_Result res = TEE_SUCCESS;
300 	struct vm_region *reg = NULL;
301 	uint32_t attr = 0;
302 
303 	if (prot & ~TEE_MATTR_PROT_MASK)
304 		return TEE_ERROR_BAD_PARAMETERS;
305 
306 	reg = calloc(1, sizeof(*reg));
307 	if (!reg)
308 		return TEE_ERROR_OUT_OF_MEMORY;
309 
310 	if (!mobj_is_paged(mobj)) {
311 		uint32_t mem_type = 0;
312 
313 		res = mobj_get_mem_type(mobj, &mem_type);
314 		if (res)
315 			goto err_free_reg;
316 		attr |= mem_type << TEE_MATTR_MEM_TYPE_SHIFT;
317 	}
318 	attr |= TEE_MATTR_VALID_BLOCK;
319 	if (mobj_is_secure(mobj))
320 		attr |= TEE_MATTR_SECURE;
321 
322 	reg->mobj = mobj_get(mobj);
323 	reg->offset = offs;
324 	reg->va = *va;
325 	reg->size = ROUNDUP(len, SMALL_PAGE_SIZE);
326 	reg->attr = attr | prot;
327 	reg->flags = flags;
328 
329 	res = umap_add_region(&uctx->vm_info, reg, pad_begin, pad_end, align);
330 	if (res)
331 		goto err_put_mobj;
332 
333 	res = alloc_pgt(uctx);
334 	if (res)
335 		goto err_rem_reg;
336 
337 	if (mobj_is_paged(mobj)) {
338 		struct fobj *fobj = mobj_get_fobj(mobj);
339 
340 		if (!fobj) {
341 			res = TEE_ERROR_GENERIC;
342 			goto err_rem_reg;
343 		}
344 
345 		res = tee_pager_add_um_region(uctx, reg->va, fobj, prot);
346 		fobj_put(fobj);
347 		if (res)
348 			goto err_rem_reg;
349 	} else {
350 		set_um_region(uctx, reg);
351 	}
352 
353 	/*
354 	 * If the context currently is active set it again to update
355 	 * the mapping.
356 	 */
357 	if (thread_get_tsd()->ctx == uctx->ts_ctx)
358 		vm_set_ctx(uctx->ts_ctx);
359 
360 	*va = reg->va;
361 
362 	return TEE_SUCCESS;
363 
364 err_rem_reg:
365 	TAILQ_REMOVE(&uctx->vm_info.regions, reg, link);
366 err_put_mobj:
367 	mobj_put(reg->mobj);
368 err_free_reg:
369 	free(reg);
370 	return res;
371 }
372 
find_vm_region(struct vm_info * vm_info,vaddr_t va)373 static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
374 {
375 	struct vm_region *r = NULL;
376 
377 	TAILQ_FOREACH(r, &vm_info->regions, link)
378 		if (va >= r->va && va < r->va + r->size)
379 			return r;
380 
381 	return NULL;
382 }
383 
va_range_is_contiguous(struct vm_region * r0,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn))384 static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
385 				   size_t len,
386 				   bool (*cmp_regs)(const struct vm_region *r0,
387 						    const struct vm_region *r,
388 						    const struct vm_region *rn))
389 {
390 	struct vm_region *r = r0;
391 	vaddr_t end_va = 0;
392 
393 	if (ADD_OVERFLOW(va, len, &end_va))
394 		return false;
395 
396 	while (true) {
397 		struct vm_region *r_next = TAILQ_NEXT(r, link);
398 		vaddr_t r_end_va = r->va + r->size;
399 
400 		if (r_end_va >= end_va)
401 			return true;
402 		if (!r_next)
403 			return false;
404 		if (r_end_va != r_next->va)
405 			return false;
406 		if (cmp_regs && !cmp_regs(r0, r, r_next))
407 			return false;
408 		r = r_next;
409 	}
410 }
411 
split_vm_region(struct user_mode_ctx * uctx,struct vm_region * r,vaddr_t va)412 static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
413 				  struct vm_region *r, vaddr_t va)
414 {
415 	struct vm_region *r2 = NULL;
416 	size_t diff = va - r->va;
417 
418 	assert(diff && diff < r->size);
419 
420 	r2 = calloc(1, sizeof(*r2));
421 	if (!r2)
422 		return TEE_ERROR_OUT_OF_MEMORY;
423 
424 	if (mobj_is_paged(r->mobj)) {
425 		TEE_Result res = tee_pager_split_um_region(uctx, va);
426 
427 		if (res) {
428 			free(r2);
429 			return res;
430 		}
431 	}
432 
433 	r2->mobj = mobj_get(r->mobj);
434 	r2->offset = r->offset + diff;
435 	r2->va = va;
436 	r2->size = r->size - diff;
437 	r2->attr = r->attr;
438 	r2->flags = r->flags;
439 
440 	r->size = diff;
441 
442 	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
443 
444 	return TEE_SUCCESS;
445 }
446 
split_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len,bool (* cmp_regs)(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn),struct vm_region ** r0_ret)447 static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
448 				 size_t len,
449 				 bool (*cmp_regs)(const struct vm_region *r0,
450 						  const struct vm_region *r,
451 						  const struct vm_region *rn),
452 				 struct vm_region **r0_ret)
453 {
454 	TEE_Result res = TEE_SUCCESS;
455 	struct vm_region *r = NULL;
456 	vaddr_t end_va = 0;
457 
458 	if ((va | len) & SMALL_PAGE_MASK)
459 		return TEE_ERROR_BAD_PARAMETERS;
460 
461 	if (ADD_OVERFLOW(va, len, &end_va))
462 		return TEE_ERROR_BAD_PARAMETERS;
463 
464 	/*
465 	 * Find first vm_region in range and check that the entire range is
466 	 * contiguous.
467 	 */
468 	r = find_vm_region(&uctx->vm_info, va);
469 	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
470 		return TEE_ERROR_BAD_PARAMETERS;
471 
472 	/*
473 	 * If needed split regions so that va and len covers only complete
474 	 * regions.
475 	 */
476 	if (va != r->va) {
477 		res = split_vm_region(uctx, r, va);
478 		if (res)
479 			return res;
480 		r = TAILQ_NEXT(r, link);
481 	}
482 
483 	*r0_ret = r;
484 	r = find_vm_region(&uctx->vm_info, va + len - 1);
485 	if (!r)
486 		return TEE_ERROR_BAD_PARAMETERS;
487 	if (end_va != r->va + r->size) {
488 		res = split_vm_region(uctx, r, end_va);
489 		if (res)
490 			return res;
491 	}
492 
493 	return TEE_SUCCESS;
494 }
495 
merge_vm_range(struct user_mode_ctx * uctx,vaddr_t va,size_t len)496 static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
497 {
498 	struct vm_region *r_next = NULL;
499 	struct vm_region *r = NULL;
500 	vaddr_t end_va = 0;
501 
502 	if (ADD_OVERFLOW(va, len, &end_va))
503 		return;
504 
505 	tee_pager_merge_um_region(uctx, va, len);
506 
507 	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
508 		r_next = TAILQ_NEXT(r, link);
509 		if (!r_next)
510 			return;
511 
512 		/* Try merging with the region just before va */
513 		if (r->va + r->size < va)
514 			continue;
515 
516 		/*
517 		 * If r->va is well past our range we're done.
518 		 * Note that if it's just the page after our range we'll
519 		 * try to merge.
520 		 */
521 		if (r->va > end_va)
522 			return;
523 
524 		if (r->va + r->size != r_next->va)
525 			continue;
526 		if (r->mobj != r_next->mobj ||
527 		    r->flags != r_next->flags ||
528 		    r->attr != r_next->attr)
529 			continue;
530 		if (r->offset + r->size != r_next->offset)
531 			continue;
532 
533 		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
534 		r->size += r_next->size;
535 		mobj_put(r_next->mobj);
536 		free(r_next);
537 		r_next = r;
538 	}
539 }
540 
cmp_region_for_remap(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn)541 static bool cmp_region_for_remap(const struct vm_region *r0,
542 				 const struct vm_region *r,
543 				 const struct vm_region *rn)
544 {
545 	/*
546 	 * All the essentionals has to match for remap to make sense. The
547 	 * essentials are, mobj/fobj, attr, flags and the offset should be
548 	 * contiguous.
549 	 *
550 	 * Note that vm_remap() depends on mobj/fobj to be the same.
551 	 */
552 	return r0->flags == r->flags && r0->attr == r->attr &&
553 	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
554 }
555 
vm_remap(struct user_mode_ctx * uctx,vaddr_t * new_va,vaddr_t old_va,size_t len,size_t pad_begin,size_t pad_end)556 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
557 		    size_t len, size_t pad_begin, size_t pad_end)
558 {
559 	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
560 	TEE_Result res = TEE_SUCCESS;
561 	struct vm_region *r0 = NULL;
562 	struct vm_region *r = NULL;
563 	struct vm_region *r_next = NULL;
564 	struct vm_region *r_last = NULL;
565 	struct vm_region *r_first = NULL;
566 	struct fobj *fobj = NULL;
567 	vaddr_t next_va = 0;
568 
569 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
570 
571 	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
572 		return TEE_ERROR_BAD_PARAMETERS;
573 
574 	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
575 	if (res)
576 		return res;
577 
578 	if (mobj_is_paged(r0->mobj)) {
579 		fobj = mobj_get_fobj(r0->mobj);
580 		if (!fobj)
581 			panic();
582 	}
583 
584 	for (r = r0; r; r = r_next) {
585 		if (r->va + r->size > old_va + len)
586 			break;
587 		r_next = TAILQ_NEXT(r, link);
588 		rem_um_region(uctx, r);
589 		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
590 		TAILQ_INSERT_TAIL(&regs, r, link);
591 	}
592 
593 	/*
594 	 * Synchronize change to translation tables. Even though the pager
595 	 * case unmaps immediately we may still free a translation table.
596 	 */
597 	vm_set_ctx(uctx->ts_ctx);
598 
599 	r_first = TAILQ_FIRST(&regs);
600 	while (!TAILQ_EMPTY(&regs)) {
601 		r = TAILQ_FIRST(&regs);
602 		TAILQ_REMOVE(&regs, r, link);
603 		if (r_last) {
604 			r->va = r_last->va + r_last->size;
605 			res = umap_add_region(&uctx->vm_info, r, 0, 0, 0);
606 		} else {
607 			r->va = *new_va;
608 			res = umap_add_region(&uctx->vm_info, r, pad_begin,
609 					      pad_end + len - r->size, 0);
610 		}
611 		if (!res) {
612 			r_last = r;
613 			res = alloc_pgt(uctx);
614 		}
615 		if (!res) {
616 			if (!fobj)
617 				set_um_region(uctx, r);
618 			else
619 				res = tee_pager_add_um_region(uctx, r->va, fobj,
620 							      r->attr);
621 		}
622 
623 		if (res) {
624 			/*
625 			 * Something went wrong move all the recently added
626 			 * regions back to regs for later reinsertion at
627 			 * the original spot.
628 			 */
629 			struct vm_region *r_tmp = NULL;
630 			struct vm_region *r_stop = NULL;
631 
632 			if (r != r_last) {
633 				/*
634 				 * umap_add_region() failed, move r back to
635 				 * regs before all the rest are moved back.
636 				 */
637 				TAILQ_INSERT_HEAD(&regs, r, link);
638 			}
639 			if (r_last)
640 				r_stop = TAILQ_NEXT(r_last, link);
641 			for (r = r_first; r != r_stop; r = r_next) {
642 				r_next = TAILQ_NEXT(r, link);
643 				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
644 				if (r_tmp)
645 					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
646 							   link);
647 				else
648 					TAILQ_INSERT_HEAD(&regs, r, link);
649 				r_tmp = r;
650 			}
651 
652 			goto err_restore_map;
653 		}
654 	}
655 
656 	fobj_put(fobj);
657 
658 	vm_set_ctx(uctx->ts_ctx);
659 	*new_va = r_first->va;
660 
661 	return TEE_SUCCESS;
662 
663 err_restore_map:
664 	next_va = old_va;
665 	while (!TAILQ_EMPTY(&regs)) {
666 		r = TAILQ_FIRST(&regs);
667 		TAILQ_REMOVE(&regs, r, link);
668 		r->va = next_va;
669 		next_va += r->size;
670 		if (umap_add_region(&uctx->vm_info, r, 0, 0, 0))
671 			panic("Cannot restore mapping");
672 		if (alloc_pgt(uctx))
673 			panic("Cannot restore mapping");
674 		if (fobj) {
675 			if (tee_pager_add_um_region(uctx, r->va, fobj, r->attr))
676 				panic("Cannot restore mapping");
677 		} else {
678 			set_um_region(uctx, r);
679 		}
680 	}
681 	fobj_put(fobj);
682 	vm_set_ctx(uctx->ts_ctx);
683 
684 	return res;
685 }
686 
cmp_region_for_get_flags(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)687 static bool cmp_region_for_get_flags(const struct vm_region *r0,
688 				     const struct vm_region *r,
689 				     const struct vm_region *rn __unused)
690 {
691 	return r0->flags == r->flags;
692 }
693 
vm_get_flags(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t * flags)694 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
695 			uint32_t *flags)
696 {
697 	struct vm_region *r = NULL;
698 
699 	if (!len || ((len | va) & SMALL_PAGE_MASK))
700 		return TEE_ERROR_BAD_PARAMETERS;
701 
702 	r = find_vm_region(&uctx->vm_info, va);
703 	if (!r)
704 		return TEE_ERROR_BAD_PARAMETERS;
705 
706 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
707 		return TEE_ERROR_BAD_PARAMETERS;
708 
709 	*flags = r->flags;
710 
711 	return TEE_SUCCESS;
712 }
713 
cmp_region_for_get_prot(const struct vm_region * r0,const struct vm_region * r,const struct vm_region * rn __unused)714 static bool cmp_region_for_get_prot(const struct vm_region *r0,
715 				    const struct vm_region *r,
716 				    const struct vm_region *rn __unused)
717 {
718 	return (r0->attr & TEE_MATTR_PROT_MASK) ==
719 	       (r->attr & TEE_MATTR_PROT_MASK);
720 }
721 
vm_get_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint16_t * prot)722 TEE_Result vm_get_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
723 		       uint16_t *prot)
724 {
725 	struct vm_region *r = NULL;
726 
727 	if (!len || ((len | va) & SMALL_PAGE_MASK))
728 		return TEE_ERROR_BAD_PARAMETERS;
729 
730 	r = find_vm_region(&uctx->vm_info, va);
731 	if (!r)
732 		return TEE_ERROR_BAD_PARAMETERS;
733 
734 	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_prot))
735 		return TEE_ERROR_BAD_PARAMETERS;
736 
737 	*prot = r->attr & TEE_MATTR_PROT_MASK;
738 
739 	return TEE_SUCCESS;
740 }
741 
vm_set_prot(struct user_mode_ctx * uctx,vaddr_t va,size_t len,uint32_t prot)742 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
743 		       uint32_t prot)
744 {
745 	TEE_Result res = TEE_SUCCESS;
746 	struct vm_region *r0 = NULL;
747 	struct vm_region *r = NULL;
748 	bool was_writeable = false;
749 	bool need_sync = false;
750 
751 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
752 
753 	if (prot & ~TEE_MATTR_PROT_MASK || !len)
754 		return TEE_ERROR_BAD_PARAMETERS;
755 
756 	res = split_vm_range(uctx, va, len, NULL, &r0);
757 	if (res)
758 		return res;
759 
760 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
761 		if (r->va + r->size > va + len)
762 			break;
763 		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
764 			was_writeable = true;
765 
766 		r->attr &= ~TEE_MATTR_PROT_MASK;
767 		r->attr |= prot;
768 
769 		if (!mobj_is_paged(r->mobj)) {
770 			need_sync = true;
771 			set_um_region(uctx, r);
772 			/*
773 			 * Normally when set_um_region() is called we
774 			 * change from no mapping to some mapping, but in
775 			 * this case we change the permissions on an
776 			 * already present mapping so some TLB invalidation
777 			 * is needed. We also depend on the dsb() performed
778 			 * as part of the TLB invalidation.
779 			 */
780 			tlbi_va_range_asid(r->va, r->size, SMALL_PAGE_SIZE,
781 					   uctx->vm_info.asid);
782 		}
783 	}
784 
785 	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
786 		if (r->va + r->size > va + len)
787 			break;
788 		if (mobj_is_paged(r->mobj)) {
789 			if (!tee_pager_set_um_region_attr(uctx, r->va, r->size,
790 							  prot))
791 				panic();
792 		} else if (was_writeable) {
793 			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
794 				       r->size);
795 		}
796 
797 	}
798 	if (need_sync && was_writeable)
799 		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
800 
801 	merge_vm_range(uctx, va, len);
802 
803 	return TEE_SUCCESS;
804 }
805 
umap_remove_region(struct vm_info * vmi,struct vm_region * reg)806 static void umap_remove_region(struct vm_info *vmi, struct vm_region *reg)
807 {
808 	TAILQ_REMOVE(&vmi->regions, reg, link);
809 	mobj_put(reg->mobj);
810 	free(reg);
811 }
812 
vm_unmap(struct user_mode_ctx * uctx,vaddr_t va,size_t len)813 TEE_Result vm_unmap(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
814 {
815 	TEE_Result res = TEE_SUCCESS;
816 	struct vm_region *r = NULL;
817 	struct vm_region *r_next = NULL;
818 	size_t end_va = 0;
819 	size_t unmap_end_va = 0;
820 	size_t l = 0;
821 
822 	assert(thread_get_tsd()->ctx == uctx->ts_ctx);
823 
824 	if (ROUNDUP_OVERFLOW(len, SMALL_PAGE_SIZE, &l))
825 		return TEE_ERROR_BAD_PARAMETERS;
826 
827 	if (!l || (va & SMALL_PAGE_MASK))
828 		return TEE_ERROR_BAD_PARAMETERS;
829 
830 	if (ADD_OVERFLOW(va, l, &end_va))
831 		return TEE_ERROR_BAD_PARAMETERS;
832 
833 	res = split_vm_range(uctx, va, l, NULL, &r);
834 	if (res)
835 		return res;
836 
837 	while (true) {
838 		r_next = TAILQ_NEXT(r, link);
839 		unmap_end_va = r->va + r->size;
840 		rem_um_region(uctx, r);
841 		umap_remove_region(&uctx->vm_info, r);
842 		if (!r_next || unmap_end_va == end_va)
843 			break;
844 		r = r_next;
845 	}
846 
847 	return TEE_SUCCESS;
848 }
849 
map_kinit(struct user_mode_ctx * uctx)850 static TEE_Result map_kinit(struct user_mode_ctx *uctx)
851 {
852 	TEE_Result res = TEE_SUCCESS;
853 	struct mobj *mobj = NULL;
854 	size_t offs = 0;
855 	vaddr_t va = 0;
856 	size_t sz = 0;
857 	uint32_t prot = 0;
858 
859 	thread_get_user_kcode(&mobj, &offs, &va, &sz);
860 	if (sz) {
861 		prot = TEE_MATTR_PRX;
862 		if (IS_ENABLED(CFG_CORE_BTI))
863 			prot |= TEE_MATTR_GUARDED;
864 		res = vm_map(uctx, &va, sz, prot, VM_FLAG_PERMANENT,
865 			     mobj, offs);
866 		if (res)
867 			return res;
868 	}
869 
870 	thread_get_user_kdata(&mobj, &offs, &va, &sz);
871 	if (sz)
872 		return vm_map(uctx, &va, sz, TEE_MATTR_PRW, VM_FLAG_PERMANENT,
873 			      mobj, offs);
874 
875 	return TEE_SUCCESS;
876 }
877 
vm_info_init(struct user_mode_ctx * uctx,struct ts_ctx * ts_ctx)878 TEE_Result vm_info_init(struct user_mode_ctx *uctx, struct ts_ctx *ts_ctx)
879 {
880 	TEE_Result res;
881 	uint32_t asid = asid_alloc();
882 
883 	if (!asid) {
884 		DMSG("Failed to allocate ASID");
885 		return TEE_ERROR_GENERIC;
886 	}
887 
888 	memset(uctx, 0, sizeof(*uctx));
889 	TAILQ_INIT(&uctx->vm_info.regions);
890 	SLIST_INIT(&uctx->pgt_cache);
891 	uctx->vm_info.asid = asid;
892 	uctx->ts_ctx = ts_ctx;
893 
894 	res = map_kinit(uctx);
895 	if (res)
896 		vm_info_final(uctx);
897 	return res;
898 }
899 
vm_clean_param(struct user_mode_ctx * uctx)900 void vm_clean_param(struct user_mode_ctx *uctx)
901 {
902 	struct vm_region *next_r;
903 	struct vm_region *r;
904 
905 	TAILQ_FOREACH_SAFE(r, &uctx->vm_info.regions, link, next_r) {
906 		if (r->flags & VM_FLAG_EPHEMERAL) {
907 			rem_um_region(uctx, r);
908 			umap_remove_region(&uctx->vm_info, r);
909 		}
910 	}
911 }
912 
check_param_map_empty(struct user_mode_ctx * uctx __maybe_unused)913 static void check_param_map_empty(struct user_mode_ctx *uctx __maybe_unused)
914 {
915 	struct vm_region *r = NULL;
916 
917 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link)
918 		assert(!(r->flags & VM_FLAG_EPHEMERAL));
919 }
920 
param_mem_to_user_va(struct user_mode_ctx * uctx,struct param_mem * mem,void ** user_va)921 static TEE_Result param_mem_to_user_va(struct user_mode_ctx *uctx,
922 				       struct param_mem *mem, void **user_va)
923 {
924 	struct vm_region *region = NULL;
925 
926 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
927 		vaddr_t va = 0;
928 		size_t phys_offs = 0;
929 
930 		if (!(region->flags & VM_FLAG_EPHEMERAL))
931 			continue;
932 		if (mem->mobj != region->mobj)
933 			continue;
934 
935 		phys_offs = mobj_get_phys_offs(mem->mobj,
936 					       CORE_MMU_USER_PARAM_SIZE);
937 		phys_offs += mem->offs;
938 		if (phys_offs < region->offset)
939 			continue;
940 		if (phys_offs >= (region->offset + region->size))
941 			continue;
942 		va = region->va + phys_offs - region->offset;
943 		*user_va = (void *)va;
944 		return TEE_SUCCESS;
945 	}
946 	return TEE_ERROR_GENERIC;
947 }
948 
cmp_param_mem(const void * a0,const void * a1)949 static int cmp_param_mem(const void *a0, const void *a1)
950 {
951 	const struct param_mem *m1 = a1;
952 	const struct param_mem *m0 = a0;
953 	int ret;
954 
955 	/* Make sure that invalid param_mem are placed last in the array */
956 	if (!m0->mobj && !m1->mobj)
957 		return 0;
958 	if (!m0->mobj)
959 		return 1;
960 	if (!m1->mobj)
961 		return -1;
962 
963 	ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj));
964 	if (ret)
965 		return ret;
966 
967 	ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj);
968 	if (ret)
969 		return ret;
970 
971 	ret = CMP_TRILEAN(m0->offs, m1->offs);
972 	if (ret)
973 		return ret;
974 
975 	return CMP_TRILEAN(m0->size, m1->size);
976 }
977 
vm_map_param(struct user_mode_ctx * uctx,struct tee_ta_param * param,void * param_va[TEE_NUM_PARAMS])978 TEE_Result vm_map_param(struct user_mode_ctx *uctx, struct tee_ta_param *param,
979 			void *param_va[TEE_NUM_PARAMS])
980 {
981 	TEE_Result res = TEE_SUCCESS;
982 	size_t n;
983 	size_t m;
984 	struct param_mem mem[TEE_NUM_PARAMS];
985 
986 	memset(mem, 0, sizeof(mem));
987 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
988 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
989 		size_t phys_offs;
990 
991 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
992 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
993 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
994 			continue;
995 		phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj,
996 					       CORE_MMU_USER_PARAM_SIZE);
997 		mem[n].mobj = param->u[n].mem.mobj;
998 		mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs,
999 					CORE_MMU_USER_PARAM_SIZE);
1000 		mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs -
1001 				      mem[n].offs + param->u[n].mem.size,
1002 				      CORE_MMU_USER_PARAM_SIZE);
1003 		/*
1004 		 * For size 0 (raw pointer parameter), add minimum size
1005 		 * value to allow address to be mapped
1006 		 */
1007 		if (!mem[n].size)
1008 			mem[n].size = CORE_MMU_USER_PARAM_SIZE;
1009 	}
1010 
1011 	/*
1012 	 * Sort arguments so NULL mobj is last, secure mobjs first, then by
1013 	 * mobj pointer value since those entries can't be merged either,
1014 	 * finally by offset.
1015 	 *
1016 	 * This should result in a list where all mergeable entries are
1017 	 * next to each other and unused/invalid entries are at the end.
1018 	 */
1019 	qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem);
1020 
1021 	for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].mobj; n++) {
1022 		if (mem[n].mobj == mem[m].mobj &&
1023 		    (mem[n].offs == (mem[m].offs + mem[m].size) ||
1024 		     core_is_buffer_intersect(mem[m].offs, mem[m].size,
1025 					      mem[n].offs, mem[n].size))) {
1026 			mem[m].size = mem[n].offs + mem[n].size - mem[m].offs;
1027 			continue;
1028 		}
1029 		m++;
1030 		if (n != m)
1031 			mem[m] = mem[n];
1032 	}
1033 	/*
1034 	 * We'd like 'm' to be the number of valid entries. Here 'm' is the
1035 	 * index of the last valid entry if the first entry is valid, else
1036 	 * 0.
1037 	 */
1038 	if (mem[0].mobj)
1039 		m++;
1040 
1041 	check_param_map_empty(uctx);
1042 
1043 	for (n = 0; n < m; n++) {
1044 		vaddr_t va = 0;
1045 
1046 		res = vm_map(uctx, &va, mem[n].size,
1047 			     TEE_MATTR_PRW | TEE_MATTR_URW,
1048 			     VM_FLAG_EPHEMERAL | VM_FLAG_SHAREABLE,
1049 			     mem[n].mobj, mem[n].offs);
1050 		if (res)
1051 			goto out;
1052 	}
1053 
1054 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
1055 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
1056 
1057 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
1058 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
1059 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
1060 			continue;
1061 		if (!param->u[n].mem.mobj)
1062 			continue;
1063 
1064 		res = param_mem_to_user_va(uctx, &param->u[n].mem,
1065 					   param_va + n);
1066 		if (res != TEE_SUCCESS)
1067 			goto out;
1068 	}
1069 
1070 	res = alloc_pgt(uctx);
1071 out:
1072 	if (res)
1073 		vm_clean_param(uctx);
1074 
1075 	return res;
1076 }
1077 
vm_info_final(struct user_mode_ctx * uctx)1078 void vm_info_final(struct user_mode_ctx *uctx)
1079 {
1080 	if (!uctx->vm_info.asid)
1081 		return;
1082 
1083 	pgt_flush(uctx);
1084 	tee_pager_rem_um_regions(uctx);
1085 
1086 	/* clear MMU entries to avoid clash when asid is reused */
1087 	tlbi_asid(uctx->vm_info.asid);
1088 
1089 	asid_free(uctx->vm_info.asid);
1090 	uctx->vm_info.asid = 0;
1091 
1092 	while (!TAILQ_EMPTY(&uctx->vm_info.regions))
1093 		umap_remove_region(&uctx->vm_info,
1094 				   TAILQ_FIRST(&uctx->vm_info.regions));
1095 }
1096 
1097 /* return true only if buffer fits inside TA private memory */
vm_buf_is_inside_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1098 bool vm_buf_is_inside_um_private(const struct user_mode_ctx *uctx,
1099 				 const void *va, size_t size)
1100 {
1101 	struct vm_region *r = NULL;
1102 
1103 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1104 		if (r->flags & VM_FLAGS_NONPRIV)
1105 			continue;
1106 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size))
1107 			return true;
1108 	}
1109 
1110 	return false;
1111 }
1112 
1113 /* return true only if buffer intersects TA private memory */
vm_buf_intersects_um_private(const struct user_mode_ctx * uctx,const void * va,size_t size)1114 bool vm_buf_intersects_um_private(const struct user_mode_ctx *uctx,
1115 				  const void *va, size_t size)
1116 {
1117 	struct vm_region *r = NULL;
1118 
1119 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1120 		if (r->attr & VM_FLAGS_NONPRIV)
1121 			continue;
1122 		if (core_is_buffer_intersect((vaddr_t)va, size, r->va, r->size))
1123 			return true;
1124 	}
1125 
1126 	return false;
1127 }
1128 
vm_buf_to_mboj_offs(const struct user_mode_ctx * uctx,const void * va,size_t size,struct mobj ** mobj,size_t * offs)1129 TEE_Result vm_buf_to_mboj_offs(const struct user_mode_ctx *uctx,
1130 			       const void *va, size_t size,
1131 			       struct mobj **mobj, size_t *offs)
1132 {
1133 	struct vm_region *r = NULL;
1134 
1135 	TAILQ_FOREACH(r, &uctx->vm_info.regions, link) {
1136 		if (!r->mobj)
1137 			continue;
1138 		if (core_is_buffer_inside((vaddr_t)va, size, r->va, r->size)) {
1139 			size_t poffs;
1140 
1141 			poffs = mobj_get_phys_offs(r->mobj,
1142 						   CORE_MMU_USER_PARAM_SIZE);
1143 			*mobj = r->mobj;
1144 			*offs = (vaddr_t)va - r->va + r->offset - poffs;
1145 			return TEE_SUCCESS;
1146 		}
1147 	}
1148 
1149 	return TEE_ERROR_BAD_PARAMETERS;
1150 }
1151 
tee_mmu_user_va2pa_attr(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa,uint32_t * attr)1152 static TEE_Result tee_mmu_user_va2pa_attr(const struct user_mode_ctx *uctx,
1153 					  void *ua, paddr_t *pa, uint32_t *attr)
1154 {
1155 	struct vm_region *region = NULL;
1156 
1157 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1158 		if (!core_is_buffer_inside((vaddr_t)ua, 1, region->va,
1159 					   region->size))
1160 			continue;
1161 
1162 		if (pa) {
1163 			TEE_Result res;
1164 			paddr_t p;
1165 			size_t offset;
1166 			size_t granule;
1167 
1168 			/*
1169 			 * mobj and input user address may each include
1170 			 * a specific offset-in-granule position.
1171 			 * Drop both to get target physical page base
1172 			 * address then apply only user address
1173 			 * offset-in-granule.
1174 			 * Mapping lowest granule is the small page.
1175 			 */
1176 			granule = MAX(region->mobj->phys_granule,
1177 				      (size_t)SMALL_PAGE_SIZE);
1178 			assert(!granule || IS_POWER_OF_TWO(granule));
1179 
1180 			offset = region->offset +
1181 				 ROUNDDOWN2((vaddr_t)ua - region->va, granule);
1182 
1183 			res = mobj_get_pa(region->mobj, offset, granule, &p);
1184 			if (res != TEE_SUCCESS)
1185 				return res;
1186 
1187 			*pa = p | ((vaddr_t)ua & (granule - 1));
1188 		}
1189 		if (attr)
1190 			*attr = region->attr;
1191 
1192 		return TEE_SUCCESS;
1193 	}
1194 
1195 	return TEE_ERROR_ACCESS_DENIED;
1196 }
1197 
vm_va2pa(const struct user_mode_ctx * uctx,void * ua,paddr_t * pa)1198 TEE_Result vm_va2pa(const struct user_mode_ctx *uctx, void *ua, paddr_t *pa)
1199 {
1200 	return tee_mmu_user_va2pa_attr(uctx, ua, pa, NULL);
1201 }
1202 
vm_pa2va(const struct user_mode_ctx * uctx,paddr_t pa,size_t pa_size)1203 void *vm_pa2va(const struct user_mode_ctx *uctx, paddr_t pa, size_t pa_size)
1204 {
1205 	paddr_t p = 0;
1206 	struct vm_region *region = NULL;
1207 
1208 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
1209 		size_t granule = 0;
1210 		size_t size = 0;
1211 		size_t ofs = 0;
1212 
1213 		/* pa2va is expected only for memory tracked through mobj */
1214 		if (!region->mobj)
1215 			continue;
1216 
1217 		/* Physically granulated memory object must be scanned */
1218 		granule = region->mobj->phys_granule;
1219 		assert(!granule || IS_POWER_OF_TWO(granule));
1220 
1221 		for (ofs = region->offset; ofs < region->size; ofs += size) {
1222 
1223 			if (granule) {
1224 				/* From current offset to buffer/granule end */
1225 				size = granule - (ofs & (granule - 1));
1226 
1227 				if (size > (region->size - ofs))
1228 					size = region->size - ofs;
1229 			} else {
1230 				size = region->size;
1231 			}
1232 
1233 			if (mobj_get_pa(region->mobj, ofs, granule, &p))
1234 				continue;
1235 
1236 			if (core_is_buffer_inside(pa, pa_size, p, size)) {
1237 				/* Remove region offset (mobj phys offset) */
1238 				ofs -= region->offset;
1239 				/* Get offset-in-granule */
1240 				p = pa - p;
1241 
1242 				return (void *)(region->va + ofs + (vaddr_t)p);
1243 			}
1244 		}
1245 	}
1246 
1247 	return NULL;
1248 }
1249 
vm_check_access_rights(const struct user_mode_ctx * uctx,uint32_t flags,uaddr_t uaddr,size_t len)1250 TEE_Result vm_check_access_rights(const struct user_mode_ctx *uctx,
1251 				  uint32_t flags, uaddr_t uaddr, size_t len)
1252 {
1253 	uaddr_t a = 0;
1254 	uaddr_t end_addr = 0;
1255 	size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
1256 			       CORE_MMU_USER_PARAM_SIZE);
1257 
1258 	if (ADD_OVERFLOW(uaddr, len, &end_addr))
1259 		return TEE_ERROR_ACCESS_DENIED;
1260 
1261 	if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1262 	    (flags & TEE_MEMORY_ACCESS_SECURE))
1263 		return TEE_ERROR_ACCESS_DENIED;
1264 
1265 	if (len == 0)
1266 		return TEE_SUCCESS;
1267 
1268 	/*
1269 	 * Rely on TA private memory test to check if address range is private
1270 	 * to TA or not.
1271 	 */
1272 	if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
1273 	   !vm_buf_is_inside_um_private(uctx, (void *)uaddr, len))
1274 		return TEE_ERROR_ACCESS_DENIED;
1275 
1276 	for (a = ROUNDDOWN2(uaddr, addr_incr); a < end_addr; a += addr_incr) {
1277 		uint32_t attr;
1278 		TEE_Result res;
1279 
1280 		res = tee_mmu_user_va2pa_attr(uctx, (void *)a, NULL, &attr);
1281 		if (res != TEE_SUCCESS)
1282 			return res;
1283 
1284 		if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
1285 		    (attr & TEE_MATTR_SECURE))
1286 			return TEE_ERROR_ACCESS_DENIED;
1287 
1288 		if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
1289 		    !(attr & TEE_MATTR_SECURE))
1290 			return TEE_ERROR_ACCESS_DENIED;
1291 
1292 		if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
1293 			return TEE_ERROR_ACCESS_DENIED;
1294 		if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
1295 			return TEE_ERROR_ACCESS_DENIED;
1296 	}
1297 
1298 	return TEE_SUCCESS;
1299 }
1300 
vm_set_ctx(struct ts_ctx * ctx)1301 void vm_set_ctx(struct ts_ctx *ctx)
1302 {
1303 	struct thread_specific_data *tsd = thread_get_tsd();
1304 	struct user_mode_ctx *uctx = NULL;
1305 
1306 	core_mmu_set_user_map(NULL);
1307 
1308 	if (is_user_mode_ctx(tsd->ctx)) {
1309 		/*
1310 		 * We're coming from a user mode context so we must make
1311 		 * the pgts available for reuse.
1312 		 */
1313 		uctx = to_user_mode_ctx(tsd->ctx);
1314 		pgt_put_all(uctx);
1315 	}
1316 
1317 	if (is_user_mode_ctx(ctx)) {
1318 		struct core_mmu_user_map map = { };
1319 
1320 		uctx = to_user_mode_ctx(ctx);
1321 		core_mmu_create_user_map(uctx, &map);
1322 		core_mmu_set_user_map(&map);
1323 		tee_pager_assign_um_tables(uctx);
1324 	}
1325 	tsd->ctx = ctx;
1326 }
1327 
vm_get_mobj(struct user_mode_ctx * uctx,vaddr_t va,size_t * len,uint16_t * prot,size_t * offs)1328 struct mobj *vm_get_mobj(struct user_mode_ctx *uctx, vaddr_t va, size_t *len,
1329 			 uint16_t *prot, size_t *offs)
1330 {
1331 	struct vm_region *r = NULL;
1332 	size_t r_offs = 0;
1333 
1334 	if (!len || ((*len | va) & SMALL_PAGE_MASK))
1335 		return NULL;
1336 
1337 	r = find_vm_region(&uctx->vm_info, va);
1338 	if (!r)
1339 		return NULL;
1340 
1341 	r_offs = va - r->va;
1342 
1343 	*len = MIN(r->size - r_offs, *len);
1344 	*offs = r->offset + r_offs;
1345 	*prot = r->attr & TEE_MATTR_PROT_MASK;
1346 	return mobj_get(r->mobj);
1347 }
1348