xref: /optee_os/core/pta/system.c (revision 1936dfc78d0689c0bc81e1c216bb0dd4775aa280)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <crypto/crypto.h>
8 #include <kernel/handle.h>
9 #include <kernel/huk_subkey.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/pseudo_ta.h>
13 #include <kernel/user_ta.h>
14 #include <kernel/user_ta_store.h>
15 #include <ldelf.h>
16 #include <mm/file.h>
17 #include <mm/fobj.h>
18 #include <mm/tee_mmu.h>
19 #include <pta_system.h>
20 #include <string.h>
21 #include <tee_api_defines_extensions.h>
22 #include <tee_api_defines.h>
23 #include <util.h>
24 
25 #define MAX_ENTROPY_IN			32u
26 
27 struct bin_handle {
28 	const struct user_ta_store_ops *op;
29 	struct user_ta_store_handle *h;
30 	struct file *f;
31 	size_t offs_bytes;
32 	size_t size_bytes;
33 };
34 
35 struct system_ctx {
36 	struct handle_db db;
37 	const struct user_ta_store_ops *store_op;
38 };
39 
40 static unsigned int system_pnum;
41 
42 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused,
43 				uint32_t param_types,
44 				TEE_Param params[TEE_NUM_PARAMS])
45 {
46 	size_t entropy_sz;
47 	uint8_t *entropy_input;
48 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
49 					  TEE_PARAM_TYPE_NONE,
50 					  TEE_PARAM_TYPE_NONE,
51 					  TEE_PARAM_TYPE_NONE);
52 
53 	if (exp_pt != param_types)
54 		return TEE_ERROR_BAD_PARAMETERS;
55 	entropy_input = params[0].memref.buffer;
56 	entropy_sz = params[0].memref.size;
57 
58 	/* Fortuna PRNG requires seed <= 32 bytes */
59 	if (!entropy_sz)
60 		return TEE_ERROR_BAD_PARAMETERS;
61 
62 	entropy_sz = MIN(entropy_sz, MAX_ENTROPY_IN);
63 
64 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
65 			     entropy_input, entropy_sz);
66 	return TEE_SUCCESS;
67 }
68 
69 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s,
70 					      uint32_t param_types,
71 					      TEE_Param params[TEE_NUM_PARAMS])
72 {
73 	size_t data_len = sizeof(TEE_UUID);
74 	TEE_Result res = TEE_ERROR_GENERIC;
75 	uint8_t *data = NULL;
76 	uint32_t access_flags = 0;
77 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
78 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
79 					  TEE_PARAM_TYPE_NONE,
80 					  TEE_PARAM_TYPE_NONE);
81 	struct user_ta_ctx *utc = NULL;
82 
83 	if (exp_pt != param_types)
84 		return TEE_ERROR_BAD_PARAMETERS;
85 
86 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
87 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
88 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
89 		return TEE_ERROR_BAD_PARAMETERS;
90 
91 	utc = to_user_ta_ctx(s->ctx);
92 
93 	/*
94 	 * The derived key shall not end up in non-secure memory by
95 	 * mistake.
96 	 *
97 	 * Note that we're allowing shared memory as long as it's
98 	 * secure. This is needed because a TA always uses shared memory
99 	 * when communicating with another TA.
100 	 */
101 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
102 		       TEE_MEMORY_ACCESS_SECURE;
103 	res = tee_mmu_check_access_rights(&utc->uctx, access_flags,
104 					  (uaddr_t)params[1].memref.buffer,
105 					  params[1].memref.size);
106 	if (res != TEE_SUCCESS)
107 		return TEE_ERROR_SECURITY;
108 
109 	/* Take extra data into account. */
110 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
111 		return TEE_ERROR_SECURITY;
112 
113 	data = calloc(data_len, 1);
114 	if (!data)
115 		return TEE_ERROR_OUT_OF_MEMORY;
116 
117 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
118 
119 	/* Append the user provided data */
120 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
121 	       params[0].memref.size);
122 
123 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
124 				params[1].memref.buffer,
125 				params[1].memref.size);
126 	free(data);
127 
128 	return res;
129 }
130 
131 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types,
132 				TEE_Param params[TEE_NUM_PARAMS])
133 {
134 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
135 					  TEE_PARAM_TYPE_VALUE_INOUT,
136 					  TEE_PARAM_TYPE_VALUE_INPUT,
137 					  TEE_PARAM_TYPE_NONE);
138 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
139 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
140 	TEE_Result res = TEE_ERROR_GENERIC;
141 	struct mobj *mobj = NULL;
142 	uint32_t pad_begin = 0;
143 	uint32_t vm_flags = 0;
144 	struct fobj *f = NULL;
145 	uint32_t pad_end = 0;
146 	size_t num_bytes = 0;
147 	vaddr_t va = 0;
148 
149 	if (exp_pt != param_types)
150 		return TEE_ERROR_BAD_PARAMETERS;
151 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
152 		return TEE_ERROR_BAD_PARAMETERS;
153 
154 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
155 		vm_flags |= VM_FLAG_SHAREABLE;
156 
157 	num_bytes = params[0].value.a;
158 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
159 	pad_begin = params[2].value.a;
160 	pad_end = params[2].value.b;
161 
162 	f = fobj_ta_mem_alloc(ROUNDUP(num_bytes, SMALL_PAGE_SIZE) /
163 			      SMALL_PAGE_SIZE);
164 	if (!f)
165 		return TEE_ERROR_OUT_OF_MEMORY;
166 	mobj = mobj_with_fobj_alloc(f, NULL);
167 	fobj_put(f);
168 	if (!mobj)
169 		return TEE_ERROR_OUT_OF_MEMORY;
170 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
171 			 mobj, 0, pad_begin, pad_end);
172 	mobj_put(mobj);
173 	if (!res)
174 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
175 
176 	return res;
177 }
178 
179 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types,
180 			       TEE_Param params[TEE_NUM_PARAMS])
181 {
182 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
183 					  TEE_PARAM_TYPE_VALUE_INPUT,
184 					  TEE_PARAM_TYPE_NONE,
185 					  TEE_PARAM_TYPE_NONE);
186 
187 	if (exp_pt != param_types)
188 		return TEE_ERROR_BAD_PARAMETERS;
189 
190 	if (params[0].value.b)
191 		return TEE_ERROR_BAD_PARAMETERS;
192 
193 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx,
194 			reg_pair_to_64(params[1].value.a, params[1].value.b),
195 			ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE));
196 }
197 
198 static void ta_bin_close(void *ptr)
199 {
200 	struct bin_handle *binh = ptr;
201 
202 	if (binh) {
203 		if (binh->op && binh->h)
204 			binh->op->close(binh->h);
205 		file_put(binh->f);
206 	}
207 	free(binh);
208 }
209 
210 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
211 					uint32_t param_types,
212 					TEE_Param params[TEE_NUM_PARAMS])
213 {
214 	TEE_Result res = TEE_SUCCESS;
215 	struct bin_handle *binh = NULL;
216 	int h = 0;
217 	TEE_UUID *uuid = NULL;
218 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
219 	unsigned int tag_len = sizeof(tag);
220 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
221 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
222 					  TEE_PARAM_TYPE_NONE,
223 					  TEE_PARAM_TYPE_NONE);
224 
225 	if (exp_pt != param_types)
226 		return TEE_ERROR_BAD_PARAMETERS;
227 	if (params[0].memref.size != sizeof(*uuid))
228 		return TEE_ERROR_BAD_PARAMETERS;
229 
230 	uuid = params[0].memref.buffer;
231 
232 	binh = calloc(1, sizeof(*binh));
233 	if (!binh)
234 		return TEE_ERROR_OUT_OF_MEMORY;
235 
236 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) {
237 		DMSG("Lookup user TA ELF %pUl (%s)",
238 		     (void *)uuid, binh->op->description);
239 
240 		res = binh->op->open(uuid, &binh->h);
241 		DMSG("res=0x%x", res);
242 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
243 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
244 			break;
245 	}
246 	if (res)
247 		goto err;
248 
249 	res = binh->op->get_size(binh->h, &binh->size_bytes);
250 	if (res)
251 		goto err;
252 	res = binh->op->get_tag(binh->h, tag, &tag_len);
253 	if (res)
254 		goto err;
255 	binh->f = file_get_by_tag(tag, tag_len);
256 	if (!binh->f)
257 		goto err_oom;
258 
259 	h = handle_get(&ctx->db, binh);
260 	if (h < 0)
261 		goto err_oom;
262 
263 	return TEE_SUCCESS;
264 err_oom:
265 	res = TEE_ERROR_OUT_OF_MEMORY;
266 err:
267 	ta_bin_close(binh);
268 	return res;
269 }
270 
271 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
272 					 uint32_t param_types,
273 					 TEE_Param params[TEE_NUM_PARAMS])
274 {
275 	TEE_Result res = TEE_SUCCESS;
276 	struct bin_handle *binh = NULL;
277 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
278 					  TEE_PARAM_TYPE_NONE,
279 					  TEE_PARAM_TYPE_NONE,
280 					  TEE_PARAM_TYPE_NONE);
281 
282 	if (exp_pt != param_types)
283 		return TEE_ERROR_BAD_PARAMETERS;
284 
285 	if (params[0].value.b)
286 		return TEE_ERROR_BAD_PARAMETERS;
287 
288 	binh = handle_put(&ctx->db, params[0].value.a);
289 	if (!binh)
290 		return TEE_ERROR_BAD_PARAMETERS;
291 
292 	if (binh->offs_bytes < binh->size_bytes)
293 		res = binh->op->read(binh->h, NULL,
294 				     binh->size_bytes - binh->offs_bytes);
295 
296 	ta_bin_close(binh);
297 	return res;
298 }
299 
300 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
301 			       size_t offs_bytes, size_t num_bytes)
302 {
303 	TEE_Result res = TEE_SUCCESS;
304 	size_t l =  num_bytes;
305 
306 	if (offs_bytes < binh->offs_bytes)
307 		return TEE_ERROR_BAD_STATE;
308 	if (offs_bytes > binh->offs_bytes) {
309 		res = binh->op->read(binh->h, NULL,
310 				     offs_bytes - binh->offs_bytes);
311 		if (res)
312 			return res;
313 		binh->offs_bytes = offs_bytes;
314 	}
315 
316 	if (binh->offs_bytes + l > binh->size_bytes) {
317 		size_t rb = binh->size_bytes - binh->offs_bytes;
318 
319 		res = binh->op->read(binh->h, (void *)va, rb);
320 		if (res)
321 			return res;
322 		memset((uint8_t *)va + rb, 0, l - rb);
323 		binh->offs_bytes = binh->size_bytes;
324 	} else {
325 		res = binh->op->read(binh->h, (void *)va, l);
326 		if (res)
327 			return res;
328 		binh->offs_bytes += l;
329 	}
330 
331 	return TEE_SUCCESS;
332 }
333 
334 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
335 				       struct tee_ta_session *s,
336 				       uint32_t param_types,
337 				       TEE_Param params[TEE_NUM_PARAMS])
338 {
339 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
340 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
341 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
342 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
343 					  TEE_PARAM_TYPE_VALUE_INPUT,
344 					  TEE_PARAM_TYPE_VALUE_INOUT,
345 					  TEE_PARAM_TYPE_VALUE_INPUT);
346 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
347 	struct bin_handle *binh = NULL;
348 	TEE_Result res = TEE_SUCCESS;
349 	struct file_slice *fs = NULL;
350 	bool file_is_locked = false;
351 	struct mobj *mobj = NULL;
352 	uint32_t offs_bytes = 0;
353 	uint32_t offs_pages = 0;
354 	uint32_t num_bytes = 0;
355 	uint32_t pad_begin = 0;
356 	uint32_t pad_end = 0;
357 	size_t num_pages = 0;
358 	uint32_t flags = 0;
359 	uint32_t prot = 0;
360 	vaddr_t va = 0;
361 
362 	if (exp_pt != param_types)
363 		return TEE_ERROR_BAD_PARAMETERS;
364 
365 	binh = handle_lookup(&ctx->db, params[0].value.a);
366 	if (!binh)
367 		return TEE_ERROR_BAD_PARAMETERS;
368 	flags = params[0].value.b;
369 	offs_bytes = params[1].value.a;
370 	num_bytes = params[1].value.b;
371 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
372 	pad_begin = params[3].value.a;
373 	pad_end = params[3].value.b;
374 
375 	if ((flags & accept_flags) != flags)
376 		return TEE_ERROR_BAD_PARAMETERS;
377 
378 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
379 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
380 		return TEE_ERROR_BAD_PARAMETERS;
381 
382 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
383 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
384 		return TEE_ERROR_BAD_PARAMETERS;
385 
386 	if (offs_bytes & SMALL_PAGE_MASK)
387 		return TEE_ERROR_BAD_PARAMETERS;
388 
389 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
390 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
391 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
392 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
393 		prot |= TEE_MATTR_UX;
394 
395 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
396 	num_pages = ROUNDUP(num_bytes, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
397 
398 	if (!file_trylock(binh->f)) {
399 		/*
400 		 * Before we can block on the file lock we must make all
401 		 * our page tables available for reclaiming in order to
402 		 * avoid a dead-lock with the other thread (which already
403 		 * is holding the file lock) mapping lots of memory below.
404 		 */
405 		tee_mmu_set_ctx(NULL);
406 		file_lock(binh->f);
407 		tee_mmu_set_ctx(s->ctx);
408 	}
409 	file_is_locked = true;
410 	fs = file_find_slice(binh->f, offs_pages);
411 	if (fs) {
412 		/* If there's registered slice it has to match */
413 		if (fs->page_offset != offs_pages ||
414 		    num_pages > fs->fobj->num_pages) {
415 			res = TEE_ERROR_BAD_PARAMETERS;
416 			goto err;
417 		}
418 
419 		/* If there's a slice we must be mapping shareable */
420 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
421 			res = TEE_ERROR_BAD_PARAMETERS;
422 			goto err;
423 		}
424 
425 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
426 		if (!mobj) {
427 			res = TEE_ERROR_OUT_OF_MEMORY;
428 			goto err;
429 		}
430 		res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE,
431 				 prot, VM_FLAG_READONLY,
432 				 mobj, 0, pad_begin, pad_end);
433 		mobj_put(mobj);
434 		if (res)
435 			goto err;
436 	} else {
437 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
438 		struct file *file = NULL;
439 		uint32_t vm_flags = 0;
440 
441 		if (!f) {
442 			res = TEE_ERROR_OUT_OF_MEMORY;
443 			goto err;
444 		}
445 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
446 			file = binh->f;
447 			vm_flags |= VM_FLAG_READONLY;
448 		}
449 
450 		mobj = mobj_with_fobj_alloc(f, file);
451 		fobj_put(f);
452 		if (!mobj) {
453 			res = TEE_ERROR_OUT_OF_MEMORY;
454 			goto err;
455 		}
456 		res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE,
457 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
458 				 pad_begin, pad_end);
459 		mobj_put(mobj);
460 		if (res)
461 			goto err;
462 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
463 		if (res)
464 			goto err_unmap_va;
465 		res = vm_set_prot(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE,
466 				  prot);
467 		if (res)
468 			goto err_unmap_va;
469 
470 		/*
471 		 * The context currently is active set it again to update
472 		 * the mapping.
473 		 */
474 		tee_mmu_set_ctx(s->ctx);
475 
476 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
477 			res = file_add_slice(binh->f, f, offs_pages);
478 			if (res)
479 				goto err_unmap_va;
480 		}
481 	}
482 
483 	file_unlock(binh->f);
484 
485 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
486 	return TEE_SUCCESS;
487 
488 err_unmap_va:
489 	if (vm_unmap(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE))
490 		panic();
491 
492 	/*
493 	 * The context currently is active set it again to update
494 	 * the mapping.
495 	 */
496 	tee_mmu_set_ctx(s->ctx);
497 
498 err:
499 	if (file_is_locked)
500 		file_unlock(binh->f);
501 
502 	return res;
503 }
504 
505 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
506 					     uint32_t param_types,
507 					     TEE_Param params[TEE_NUM_PARAMS])
508 {
509 	struct bin_handle *binh = NULL;
510 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
511 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
512 					  TEE_PARAM_TYPE_NONE,
513 					  TEE_PARAM_TYPE_NONE);
514 
515 	if (exp_pt != param_types)
516 		return TEE_ERROR_BAD_PARAMETERS;
517 
518 	binh = handle_lookup(&ctx->db, params[0].value.a);
519 	if (!binh)
520 		return TEE_ERROR_BAD_PARAMETERS;
521 
522 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
523 			    params[0].value.b, params[1].memref.size);
524 }
525 
526 static TEE_Result system_set_prot(struct tee_ta_session *s,
527 				  uint32_t param_types,
528 				  TEE_Param params[TEE_NUM_PARAMS])
529 {
530 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
531 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
532 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
533 					  TEE_PARAM_TYPE_VALUE_INPUT,
534 					  TEE_PARAM_TYPE_NONE,
535 					  TEE_PARAM_TYPE_NONE);
536 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
537 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
538 	TEE_Result res = TEE_SUCCESS;
539 	uint32_t vm_flags = 0;
540 	uint32_t flags = 0;
541 	vaddr_t va = 0;
542 	size_t sz = 0;
543 
544 	if (exp_pt != param_types)
545 		return TEE_ERROR_BAD_PARAMETERS;
546 
547 	flags = params[0].value.b;
548 
549 	if ((flags & accept_flags) != flags)
550 		return TEE_ERROR_BAD_PARAMETERS;
551 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
552 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
553 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
554 		prot |= TEE_MATTR_UX;
555 
556 	va = reg_pair_to_64(params[1].value.a, params[1].value.b),
557 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
558 
559 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
560 	if (res)
561 		return res;
562 
563 	/*
564 	 * If the segment is a mapping of a part of a file (vm_flags &
565 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
566 	 * files are mapped read-only.
567 	 */
568 	if ((vm_flags & VM_FLAG_READONLY) &&
569 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
570 		return TEE_ERROR_ACCESS_DENIED;
571 
572 	return vm_set_prot(&utc->uctx, va, sz, prot);
573 }
574 
575 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types,
576 			       TEE_Param params[TEE_NUM_PARAMS])
577 {
578 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
579 					  TEE_PARAM_TYPE_VALUE_INPUT,
580 					  TEE_PARAM_TYPE_VALUE_INOUT,
581 					  TEE_PARAM_TYPE_VALUE_INPUT);
582 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
583 	TEE_Result res = TEE_SUCCESS;
584 	uint32_t num_bytes = 0;
585 	uint32_t pad_begin = 0;
586 	uint32_t pad_end = 0;
587 	vaddr_t old_va = 0;
588 	vaddr_t new_va = 0;
589 
590 	if (exp_pt != param_types)
591 		return TEE_ERROR_BAD_PARAMETERS;
592 
593 	num_bytes = params[0].value.a;
594 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
595 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
596 	pad_begin = params[3].value.a;
597 	pad_end = params[3].value.b;
598 
599 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
600 		       pad_end);
601 	if (!res)
602 		reg_pair_from_64(new_va, &params[2].value.a,
603 				 &params[2].value.b);
604 
605 	return res;
606 }
607 
608 /* ldelf has the same architecture/register width as the kernel */
609 #ifdef ARM32
610 static const bool is_arm32 = true;
611 #else
612 static const bool is_arm32;
613 #endif
614 
615 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
616 				    uint32_t flags)
617 {
618 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
619 	TEE_Result res = TEE_ERROR_GENERIC;
620 	struct dl_entry_arg *arg = NULL;
621 	uint32_t panic_code = 0;
622 	uint32_t panicked = 0;
623 
624 	assert(uuid);
625 
626 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
627 	arg = (struct dl_entry_arg *)usr_stack;
628 
629 	res = tee_mmu_check_access_rights(&utc->uctx,
630 					  TEE_MEMORY_ACCESS_READ |
631 					  TEE_MEMORY_ACCESS_WRITE |
632 					  TEE_MEMORY_ACCESS_ANY_OWNER,
633 					  (uaddr_t)arg, sizeof(*arg));
634 	if (res) {
635 		EMSG("ldelf stack is inaccessible!");
636 		return res;
637 	}
638 
639 	memset(arg, 0, sizeof(*arg));
640 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
641 	arg->dlopen.uuid = *uuid;
642 	arg->dlopen.flags = flags;
643 
644 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
645 				     usr_stack, utc->dl_entry_func,
646 				     is_arm32, &panicked, &panic_code);
647 	if (panicked) {
648 		EMSG("ldelf dl_entry function panicked");
649 		abort_print_current_ta();
650 		res = TEE_ERROR_TARGET_DEAD;
651 	}
652 	if (!res)
653 		res = arg->ret;
654 
655 	return res;
656 }
657 
658 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
659 				   const char *sym, size_t maxlen, vaddr_t *val)
660 {
661 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
662 	TEE_Result res = TEE_ERROR_GENERIC;
663 	struct dl_entry_arg *arg = NULL;
664 	uint32_t panic_code = 0;
665 	uint32_t panicked = 0;
666 	size_t len = strnlen(sym, maxlen);
667 
668 	if (len == maxlen)
669 		return TEE_ERROR_BAD_PARAMETERS;
670 
671 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
672 	arg = (struct dl_entry_arg *)usr_stack;
673 
674 	res = tee_mmu_check_access_rights(&utc->uctx,
675 					  TEE_MEMORY_ACCESS_READ |
676 					  TEE_MEMORY_ACCESS_WRITE |
677 					  TEE_MEMORY_ACCESS_ANY_OWNER,
678 					  (uaddr_t)arg, sizeof(*arg) + len + 1);
679 	if (res) {
680 		EMSG("ldelf stack is inaccessible!");
681 		return res;
682 	}
683 
684 	memset(arg, 0, sizeof(*arg));
685 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
686 	arg->dlsym.uuid = *uuid;
687 	memcpy(arg->dlsym.symbol, sym, len);
688 	arg->dlsym.symbol[len] = '\0';
689 
690 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
691 				     usr_stack, utc->dl_entry_func,
692 				     is_arm32, &panicked, &panic_code);
693 	if (panicked) {
694 		EMSG("ldelf dl_entry function panicked");
695 		abort_print_current_ta();
696 		res = TEE_ERROR_TARGET_DEAD;
697 	}
698 	if (!res) {
699 		res = arg->ret;
700 		if (!res)
701 			*val = arg->dlsym.val;
702 	}
703 
704 	return res;
705 }
706 
707 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types,
708 				TEE_Param params[TEE_NUM_PARAMS])
709 {
710 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
711 					  TEE_PARAM_TYPE_VALUE_INPUT,
712 					  TEE_PARAM_TYPE_NONE,
713 					  TEE_PARAM_TYPE_NONE);
714 	TEE_Result res = TEE_ERROR_GENERIC;
715 	struct tee_ta_session *s = NULL;
716 	struct user_ta_ctx *utc = NULL;
717 	TEE_UUID *uuid = NULL;
718 	uint32_t flags = 0;
719 
720 	if (exp_pt != param_types)
721 		return TEE_ERROR_BAD_PARAMETERS;
722 
723 	uuid = params[0].memref.buffer;
724 	if (!uuid || params[0].memref.size != sizeof(*uuid))
725 		return TEE_ERROR_BAD_PARAMETERS;
726 
727 	flags = params[1].value.a;
728 
729 	utc = to_user_ta_ctx(cs->ctx);
730 
731 	s = tee_ta_pop_current_session();
732 	res = call_ldelf_dlopen(utc, uuid, flags);
733 	tee_ta_push_current_session(s);
734 
735 	return res;
736 }
737 
738 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types,
739 			       TEE_Param params[TEE_NUM_PARAMS])
740 {
741 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
742 					  TEE_PARAM_TYPE_MEMREF_INPUT,
743 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
744 					  TEE_PARAM_TYPE_NONE);
745 	TEE_Result res = TEE_ERROR_GENERIC;
746 	struct tee_ta_session *s = NULL;
747 	struct user_ta_ctx *utc = NULL;
748 	const char *sym = NULL;
749 	TEE_UUID *uuid = NULL;
750 	size_t maxlen = 0;
751 	vaddr_t va = 0;
752 
753 	if (exp_pt != param_types)
754 		return TEE_ERROR_BAD_PARAMETERS;
755 
756 	uuid = params[0].memref.buffer;
757 	if (uuid && params[0].memref.size != sizeof(*uuid))
758 		return TEE_ERROR_BAD_PARAMETERS;
759 
760 	sym = params[1].memref.buffer;
761 	if (!sym)
762 		return TEE_ERROR_BAD_PARAMETERS;
763 	maxlen = params[1].memref.size;
764 
765 	utc = to_user_ta_ctx(cs->ctx);
766 
767 	s = tee_ta_pop_current_session();
768 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
769 	tee_ta_push_current_session(s);
770 
771 	if (!res)
772 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
773 
774 	return res;
775 }
776 
777 static TEE_Result open_session(uint32_t param_types __unused,
778 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
779 			       void **sess_ctx)
780 {
781 	struct tee_ta_session *s = NULL;
782 	struct system_ctx *ctx = NULL;
783 
784 	/* Check that we're called from a user TA */
785 	s = tee_ta_get_calling_session();
786 	if (!s)
787 		return TEE_ERROR_ACCESS_DENIED;
788 	if (!is_user_ta_ctx(s->ctx))
789 		return TEE_ERROR_ACCESS_DENIED;
790 
791 	ctx = calloc(1, sizeof(*ctx));
792 	if (!ctx)
793 		return TEE_ERROR_OUT_OF_MEMORY;
794 
795 	*sess_ctx = ctx;
796 
797 	return TEE_SUCCESS;
798 }
799 
800 static void close_session(void *sess_ctx)
801 {
802 	struct system_ctx *ctx = sess_ctx;
803 
804 	handle_db_destroy(&ctx->db, ta_bin_close);
805 	free(ctx);
806 }
807 
808 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
809 				 uint32_t param_types,
810 				 TEE_Param params[TEE_NUM_PARAMS])
811 {
812 	struct tee_ta_session *s = tee_ta_get_calling_session();
813 
814 	switch (cmd_id) {
815 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
816 		return system_rng_reseed(s, param_types, params);
817 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
818 		return system_derive_ta_unique_key(s, param_types, params);
819 	case PTA_SYSTEM_MAP_ZI:
820 		return system_map_zi(s, param_types, params);
821 	case PTA_SYSTEM_UNMAP:
822 		return system_unmap(s, param_types, params);
823 	case PTA_SYSTEM_OPEN_TA_BINARY:
824 		return system_open_ta_binary(sess_ctx, param_types, params);
825 	case PTA_SYSTEM_CLOSE_TA_BINARY:
826 		return system_close_ta_binary(sess_ctx, param_types, params);
827 	case PTA_SYSTEM_MAP_TA_BINARY:
828 		return system_map_ta_binary(sess_ctx, s, param_types, params);
829 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
830 		return system_copy_from_ta_binary(sess_ctx, param_types,
831 						  params);
832 	case PTA_SYSTEM_SET_PROT:
833 		return system_set_prot(s, param_types, params);
834 	case PTA_SYSTEM_REMAP:
835 		return system_remap(s, param_types, params);
836 	case PTA_SYSTEM_DLOPEN:
837 		return system_dlopen(s, param_types, params);
838 	case PTA_SYSTEM_DLSYM:
839 		return system_dlsym(s, param_types, params);
840 	default:
841 		break;
842 	}
843 
844 	return TEE_ERROR_NOT_IMPLEMENTED;
845 }
846 
847 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
848 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
849 		   .open_session_entry_point = open_session,
850 		   .close_session_entry_point = close_session,
851 		   .invoke_command_entry_point = invoke_command);
852