xref: /optee_os/core/pta/system.c (revision 1e4e976bf8a7b454c3303c3097904a3a2dbd6a83)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <crypto/crypto.h>
8 #include <kernel/handle.h>
9 #include <kernel/huk_subkey.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/pseudo_ta.h>
13 #include <kernel/user_ta.h>
14 #include <kernel/user_ta_store.h>
15 #include <ldelf.h>
16 #include <mm/file.h>
17 #include <mm/fobj.h>
18 #include <mm/tee_mmu.h>
19 #include <pta_system.h>
20 #include <string.h>
21 #include <tee_api_defines_extensions.h>
22 #include <tee_api_defines.h>
23 #include <util.h>
24 #include <kernel/tpm.h>
25 
26 struct bin_handle {
27 	const struct user_ta_store_ops *op;
28 	struct user_ta_store_handle *h;
29 	struct file *f;
30 	size_t offs_bytes;
31 	size_t size_bytes;
32 };
33 
34 struct system_ctx {
35 	struct handle_db db;
36 	const struct user_ta_store_ops *store_op;
37 };
38 
39 static unsigned int system_pnum;
40 
41 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused,
42 				uint32_t param_types,
43 				TEE_Param params[TEE_NUM_PARAMS])
44 {
45 	size_t entropy_sz;
46 	uint8_t *entropy_input;
47 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
48 					  TEE_PARAM_TYPE_NONE,
49 					  TEE_PARAM_TYPE_NONE,
50 					  TEE_PARAM_TYPE_NONE);
51 
52 	if (exp_pt != param_types)
53 		return TEE_ERROR_BAD_PARAMETERS;
54 	entropy_input = params[0].memref.buffer;
55 	entropy_sz = params[0].memref.size;
56 
57 	if (!entropy_sz || !entropy_input)
58 		return TEE_ERROR_BAD_PARAMETERS;
59 
60 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
61 			     entropy_input, entropy_sz);
62 	return TEE_SUCCESS;
63 }
64 
65 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s,
66 					      uint32_t param_types,
67 					      TEE_Param params[TEE_NUM_PARAMS])
68 {
69 	size_t data_len = sizeof(TEE_UUID);
70 	TEE_Result res = TEE_ERROR_GENERIC;
71 	uint8_t *data = NULL;
72 	uint32_t access_flags = 0;
73 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
74 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
75 					  TEE_PARAM_TYPE_NONE,
76 					  TEE_PARAM_TYPE_NONE);
77 	struct user_ta_ctx *utc = NULL;
78 
79 	if (exp_pt != param_types)
80 		return TEE_ERROR_BAD_PARAMETERS;
81 
82 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
83 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
84 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
85 		return TEE_ERROR_BAD_PARAMETERS;
86 
87 	utc = to_user_ta_ctx(s->ctx);
88 
89 	/*
90 	 * The derived key shall not end up in non-secure memory by
91 	 * mistake.
92 	 *
93 	 * Note that we're allowing shared memory as long as it's
94 	 * secure. This is needed because a TA always uses shared memory
95 	 * when communicating with another TA.
96 	 */
97 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
98 		       TEE_MEMORY_ACCESS_SECURE;
99 	res = tee_mmu_check_access_rights(&utc->uctx, access_flags,
100 					  (uaddr_t)params[1].memref.buffer,
101 					  params[1].memref.size);
102 	if (res != TEE_SUCCESS)
103 		return TEE_ERROR_SECURITY;
104 
105 	/* Take extra data into account. */
106 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
107 		return TEE_ERROR_SECURITY;
108 
109 	data = calloc(data_len, 1);
110 	if (!data)
111 		return TEE_ERROR_OUT_OF_MEMORY;
112 
113 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
114 
115 	/* Append the user provided data */
116 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
117 	       params[0].memref.size);
118 
119 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
120 				params[1].memref.buffer,
121 				params[1].memref.size);
122 	free(data);
123 
124 	return res;
125 }
126 
127 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types,
128 				TEE_Param params[TEE_NUM_PARAMS])
129 {
130 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
131 					  TEE_PARAM_TYPE_VALUE_INOUT,
132 					  TEE_PARAM_TYPE_VALUE_INPUT,
133 					  TEE_PARAM_TYPE_NONE);
134 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
135 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
136 	TEE_Result res = TEE_ERROR_GENERIC;
137 	struct mobj *mobj = NULL;
138 	uint32_t pad_begin = 0;
139 	uint32_t vm_flags = 0;
140 	struct fobj *f = NULL;
141 	uint32_t pad_end = 0;
142 	size_t num_bytes = 0;
143 	vaddr_t va = 0;
144 
145 	if (exp_pt != param_types)
146 		return TEE_ERROR_BAD_PARAMETERS;
147 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
148 		return TEE_ERROR_BAD_PARAMETERS;
149 
150 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
151 		vm_flags |= VM_FLAG_SHAREABLE;
152 
153 	num_bytes = params[0].value.a;
154 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
155 	pad_begin = params[2].value.a;
156 	pad_end = params[2].value.b;
157 
158 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
159 	if (!f)
160 		return TEE_ERROR_OUT_OF_MEMORY;
161 	mobj = mobj_with_fobj_alloc(f, NULL);
162 	fobj_put(f);
163 	if (!mobj)
164 		return TEE_ERROR_OUT_OF_MEMORY;
165 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
166 			 mobj, 0, pad_begin, pad_end);
167 	mobj_put(mobj);
168 	if (!res)
169 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
170 
171 	return res;
172 }
173 
174 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types,
175 			       TEE_Param params[TEE_NUM_PARAMS])
176 {
177 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
178 					  TEE_PARAM_TYPE_VALUE_INPUT,
179 					  TEE_PARAM_TYPE_NONE,
180 					  TEE_PARAM_TYPE_NONE);
181 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
182 	TEE_Result res = TEE_SUCCESS;
183 	uint32_t vm_flags = 0;
184 	vaddr_t va = 0;
185 	size_t sz = 0;
186 
187 	if (exp_pt != param_types)
188 		return TEE_ERROR_BAD_PARAMETERS;
189 
190 	if (params[0].value.b)
191 		return TEE_ERROR_BAD_PARAMETERS;
192 
193 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
194 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
195 
196 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
197 	if (res)
198 		return res;
199 	if (vm_flags & VM_FLAG_PERMANENT)
200 		return TEE_ERROR_ACCESS_DENIED;
201 
202 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz);
203 }
204 
205 static void ta_bin_close(void *ptr)
206 {
207 	struct bin_handle *binh = ptr;
208 
209 	if (binh) {
210 		if (binh->op && binh->h)
211 			binh->op->close(binh->h);
212 		file_put(binh->f);
213 	}
214 	free(binh);
215 }
216 
217 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
218 					uint32_t param_types,
219 					TEE_Param params[TEE_NUM_PARAMS])
220 {
221 	TEE_Result res = TEE_SUCCESS;
222 	struct bin_handle *binh = NULL;
223 	int h = 0;
224 	TEE_UUID *uuid = NULL;
225 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
226 	unsigned int tag_len = sizeof(tag);
227 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
228 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
229 					  TEE_PARAM_TYPE_NONE,
230 					  TEE_PARAM_TYPE_NONE);
231 
232 	if (exp_pt != param_types)
233 		return TEE_ERROR_BAD_PARAMETERS;
234 	if (params[0].memref.size != sizeof(*uuid))
235 		return TEE_ERROR_BAD_PARAMETERS;
236 
237 	uuid = params[0].memref.buffer;
238 
239 	binh = calloc(1, sizeof(*binh));
240 	if (!binh)
241 		return TEE_ERROR_OUT_OF_MEMORY;
242 
243 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) {
244 		DMSG("Lookup user TA ELF %pUl (%s)",
245 		     (void *)uuid, binh->op->description);
246 
247 		res = binh->op->open(uuid, &binh->h);
248 		DMSG("res=0x%x", res);
249 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
250 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
251 			break;
252 	}
253 	if (res)
254 		goto err;
255 
256 	res = binh->op->get_size(binh->h, &binh->size_bytes);
257 	if (res)
258 		goto err;
259 	res = binh->op->get_tag(binh->h, tag, &tag_len);
260 	if (res)
261 		goto err;
262 	binh->f = file_get_by_tag(tag, tag_len);
263 	if (!binh->f)
264 		goto err_oom;
265 
266 	h = handle_get(&ctx->db, binh);
267 	if (h < 0)
268 		goto err_oom;
269 	params[0].value.a = h;
270 
271 	return TEE_SUCCESS;
272 err_oom:
273 	res = TEE_ERROR_OUT_OF_MEMORY;
274 err:
275 	ta_bin_close(binh);
276 	return res;
277 }
278 
279 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
280 					 uint32_t param_types,
281 					 TEE_Param params[TEE_NUM_PARAMS])
282 {
283 	TEE_Result res = TEE_SUCCESS;
284 	struct bin_handle *binh = NULL;
285 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
286 					  TEE_PARAM_TYPE_NONE,
287 					  TEE_PARAM_TYPE_NONE,
288 					  TEE_PARAM_TYPE_NONE);
289 
290 	if (exp_pt != param_types)
291 		return TEE_ERROR_BAD_PARAMETERS;
292 
293 	if (params[0].value.b)
294 		return TEE_ERROR_BAD_PARAMETERS;
295 
296 	binh = handle_put(&ctx->db, params[0].value.a);
297 	if (!binh)
298 		return TEE_ERROR_BAD_PARAMETERS;
299 
300 	if (binh->offs_bytes < binh->size_bytes)
301 		res = binh->op->read(binh->h, NULL,
302 				     binh->size_bytes - binh->offs_bytes);
303 
304 	ta_bin_close(binh);
305 	return res;
306 }
307 
308 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
309 			       size_t offs_bytes, size_t num_bytes)
310 {
311 	TEE_Result res = TEE_SUCCESS;
312 	size_t l =  num_bytes;
313 
314 	if (offs_bytes < binh->offs_bytes)
315 		return TEE_ERROR_BAD_STATE;
316 	if (offs_bytes > binh->offs_bytes) {
317 		res = binh->op->read(binh->h, NULL,
318 				     offs_bytes - binh->offs_bytes);
319 		if (res)
320 			return res;
321 		binh->offs_bytes = offs_bytes;
322 	}
323 
324 	if (binh->offs_bytes + l > binh->size_bytes) {
325 		size_t rb = binh->size_bytes - binh->offs_bytes;
326 
327 		res = binh->op->read(binh->h, (void *)va, rb);
328 		if (res)
329 			return res;
330 		memset((uint8_t *)va + rb, 0, l - rb);
331 		binh->offs_bytes = binh->size_bytes;
332 	} else {
333 		res = binh->op->read(binh->h, (void *)va, l);
334 		if (res)
335 			return res;
336 		binh->offs_bytes += l;
337 	}
338 
339 	return TEE_SUCCESS;
340 }
341 
342 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
343 				       struct tee_ta_session *s,
344 				       uint32_t param_types,
345 				       TEE_Param params[TEE_NUM_PARAMS])
346 {
347 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
348 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
349 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
350 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
351 					  TEE_PARAM_TYPE_VALUE_INPUT,
352 					  TEE_PARAM_TYPE_VALUE_INOUT,
353 					  TEE_PARAM_TYPE_VALUE_INPUT);
354 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
355 	struct bin_handle *binh = NULL;
356 	uint32_t num_rounded_bytes = 0;
357 	TEE_Result res = TEE_SUCCESS;
358 	struct file_slice *fs = NULL;
359 	bool file_is_locked = false;
360 	struct mobj *mobj = NULL;
361 	uint32_t offs_bytes = 0;
362 	uint32_t offs_pages = 0;
363 	uint32_t num_bytes = 0;
364 	uint32_t pad_begin = 0;
365 	uint32_t pad_end = 0;
366 	size_t num_pages = 0;
367 	uint32_t flags = 0;
368 	uint32_t prot = 0;
369 	vaddr_t va = 0;
370 
371 	if (exp_pt != param_types)
372 		return TEE_ERROR_BAD_PARAMETERS;
373 
374 	binh = handle_lookup(&ctx->db, params[0].value.a);
375 	if (!binh)
376 		return TEE_ERROR_BAD_PARAMETERS;
377 	flags = params[0].value.b;
378 	offs_bytes = params[1].value.a;
379 	num_bytes = params[1].value.b;
380 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
381 	pad_begin = params[3].value.a;
382 	pad_end = params[3].value.b;
383 
384 	if ((flags & accept_flags) != flags)
385 		return TEE_ERROR_BAD_PARAMETERS;
386 
387 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
388 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
389 		return TEE_ERROR_BAD_PARAMETERS;
390 
391 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
392 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
393 		return TEE_ERROR_BAD_PARAMETERS;
394 
395 	if (offs_bytes & SMALL_PAGE_MASK)
396 		return TEE_ERROR_BAD_PARAMETERS;
397 
398 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
399 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
400 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
401 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
402 		prot |= TEE_MATTR_UX;
403 
404 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
405 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
406 		return TEE_ERROR_BAD_PARAMETERS;
407 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
408 
409 	if (!file_trylock(binh->f)) {
410 		/*
411 		 * Before we can block on the file lock we must make all
412 		 * our page tables available for reclaiming in order to
413 		 * avoid a dead-lock with the other thread (which already
414 		 * is holding the file lock) mapping lots of memory below.
415 		 */
416 		tee_mmu_set_ctx(NULL);
417 		file_lock(binh->f);
418 		tee_mmu_set_ctx(s->ctx);
419 	}
420 	file_is_locked = true;
421 	fs = file_find_slice(binh->f, offs_pages);
422 	if (fs) {
423 		/* If there's registered slice it has to match */
424 		if (fs->page_offset != offs_pages ||
425 		    num_pages > fs->fobj->num_pages) {
426 			res = TEE_ERROR_BAD_PARAMETERS;
427 			goto err;
428 		}
429 
430 		/* If there's a slice we must be mapping shareable */
431 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
432 			res = TEE_ERROR_BAD_PARAMETERS;
433 			goto err;
434 		}
435 
436 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
437 		if (!mobj) {
438 			res = TEE_ERROR_OUT_OF_MEMORY;
439 			goto err;
440 		}
441 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
442 				 prot, VM_FLAG_READONLY,
443 				 mobj, 0, pad_begin, pad_end);
444 		mobj_put(mobj);
445 		if (res)
446 			goto err;
447 	} else {
448 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
449 		struct file *file = NULL;
450 		uint32_t vm_flags = 0;
451 
452 		if (!f) {
453 			res = TEE_ERROR_OUT_OF_MEMORY;
454 			goto err;
455 		}
456 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
457 			file = binh->f;
458 			vm_flags |= VM_FLAG_READONLY;
459 		}
460 
461 		mobj = mobj_with_fobj_alloc(f, file);
462 		fobj_put(f);
463 		if (!mobj) {
464 			res = TEE_ERROR_OUT_OF_MEMORY;
465 			goto err;
466 		}
467 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
468 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
469 				 pad_begin, pad_end);
470 		mobj_put(mobj);
471 		if (res)
472 			goto err;
473 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
474 		if (res)
475 			goto err_unmap_va;
476 		res = vm_set_prot(&utc->uctx, va, num_rounded_bytes,
477 				  prot);
478 		if (res)
479 			goto err_unmap_va;
480 
481 		/*
482 		 * The context currently is active set it again to update
483 		 * the mapping.
484 		 */
485 		tee_mmu_set_ctx(s->ctx);
486 
487 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
488 			res = file_add_slice(binh->f, f, offs_pages);
489 			if (res)
490 				goto err_unmap_va;
491 		}
492 	}
493 
494 	file_unlock(binh->f);
495 
496 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
497 	return TEE_SUCCESS;
498 
499 err_unmap_va:
500 	if (vm_unmap(&utc->uctx, va, num_rounded_bytes))
501 		panic();
502 
503 	/*
504 	 * The context currently is active set it again to update
505 	 * the mapping.
506 	 */
507 	tee_mmu_set_ctx(s->ctx);
508 
509 err:
510 	if (file_is_locked)
511 		file_unlock(binh->f);
512 
513 	return res;
514 }
515 
516 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
517 					     uint32_t param_types,
518 					     TEE_Param params[TEE_NUM_PARAMS])
519 {
520 	struct bin_handle *binh = NULL;
521 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
522 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
523 					  TEE_PARAM_TYPE_NONE,
524 					  TEE_PARAM_TYPE_NONE);
525 
526 	if (exp_pt != param_types)
527 		return TEE_ERROR_BAD_PARAMETERS;
528 
529 	binh = handle_lookup(&ctx->db, params[0].value.a);
530 	if (!binh)
531 		return TEE_ERROR_BAD_PARAMETERS;
532 
533 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
534 			    params[0].value.b, params[1].memref.size);
535 }
536 
537 static TEE_Result system_set_prot(struct tee_ta_session *s,
538 				  uint32_t param_types,
539 				  TEE_Param params[TEE_NUM_PARAMS])
540 {
541 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
542 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
543 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
544 					  TEE_PARAM_TYPE_VALUE_INPUT,
545 					  TEE_PARAM_TYPE_NONE,
546 					  TEE_PARAM_TYPE_NONE);
547 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
548 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
549 	TEE_Result res = TEE_SUCCESS;
550 	uint32_t vm_flags = 0;
551 	uint32_t flags = 0;
552 	vaddr_t va = 0;
553 	size_t sz = 0;
554 
555 	if (exp_pt != param_types)
556 		return TEE_ERROR_BAD_PARAMETERS;
557 
558 	flags = params[0].value.b;
559 
560 	if ((flags & accept_flags) != flags)
561 		return TEE_ERROR_BAD_PARAMETERS;
562 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
563 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
564 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
565 		prot |= TEE_MATTR_UX;
566 
567 	va = reg_pair_to_64(params[1].value.a, params[1].value.b),
568 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
569 
570 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
571 	if (res)
572 		return res;
573 	if (vm_flags & VM_FLAG_PERMANENT)
574 		return TEE_ERROR_ACCESS_DENIED;
575 
576 	/*
577 	 * If the segment is a mapping of a part of a file (vm_flags &
578 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
579 	 * files are mapped read-only.
580 	 */
581 	if ((vm_flags & VM_FLAG_READONLY) &&
582 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
583 		return TEE_ERROR_ACCESS_DENIED;
584 
585 	return vm_set_prot(&utc->uctx, va, sz, prot);
586 }
587 
588 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types,
589 			       TEE_Param params[TEE_NUM_PARAMS])
590 {
591 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
592 					  TEE_PARAM_TYPE_VALUE_INPUT,
593 					  TEE_PARAM_TYPE_VALUE_INOUT,
594 					  TEE_PARAM_TYPE_VALUE_INPUT);
595 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
596 	TEE_Result res = TEE_SUCCESS;
597 	uint32_t num_bytes = 0;
598 	uint32_t pad_begin = 0;
599 	uint32_t vm_flags = 0;
600 	uint32_t pad_end = 0;
601 	vaddr_t old_va = 0;
602 	vaddr_t new_va = 0;
603 
604 	if (exp_pt != param_types)
605 		return TEE_ERROR_BAD_PARAMETERS;
606 
607 	num_bytes = params[0].value.a;
608 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
609 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
610 	pad_begin = params[3].value.a;
611 	pad_end = params[3].value.b;
612 
613 	res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags);
614 	if (res)
615 		return res;
616 	if (vm_flags & VM_FLAG_PERMANENT)
617 		return TEE_ERROR_ACCESS_DENIED;
618 
619 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
620 		       pad_end);
621 	if (!res)
622 		reg_pair_from_64(new_va, &params[2].value.a,
623 				 &params[2].value.b);
624 
625 	return res;
626 }
627 
628 /* ldelf has the same architecture/register width as the kernel */
629 #ifdef ARM32
630 static const bool is_arm32 = true;
631 #else
632 static const bool is_arm32;
633 #endif
634 
635 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
636 				    uint32_t flags)
637 {
638 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
639 	TEE_Result res = TEE_ERROR_GENERIC;
640 	struct dl_entry_arg *arg = NULL;
641 	uint32_t panic_code = 0;
642 	uint32_t panicked = 0;
643 
644 	assert(uuid);
645 
646 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
647 	arg = (struct dl_entry_arg *)usr_stack;
648 
649 	res = tee_mmu_check_access_rights(&utc->uctx,
650 					  TEE_MEMORY_ACCESS_READ |
651 					  TEE_MEMORY_ACCESS_WRITE |
652 					  TEE_MEMORY_ACCESS_ANY_OWNER,
653 					  (uaddr_t)arg, sizeof(*arg));
654 	if (res) {
655 		EMSG("ldelf stack is inaccessible!");
656 		return res;
657 	}
658 
659 	memset(arg, 0, sizeof(*arg));
660 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
661 	arg->dlopen.uuid = *uuid;
662 	arg->dlopen.flags = flags;
663 
664 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
665 				     usr_stack, utc->dl_entry_func,
666 				     is_arm32, &panicked, &panic_code);
667 	if (panicked) {
668 		EMSG("ldelf dl_entry function panicked");
669 		abort_print_current_ta();
670 		res = TEE_ERROR_TARGET_DEAD;
671 	}
672 	if (!res)
673 		res = arg->ret;
674 
675 	return res;
676 }
677 
678 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
679 				   const char *sym, size_t maxlen, vaddr_t *val)
680 {
681 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
682 	TEE_Result res = TEE_ERROR_GENERIC;
683 	struct dl_entry_arg *arg = NULL;
684 	uint32_t panic_code = 0;
685 	uint32_t panicked = 0;
686 	size_t len = strnlen(sym, maxlen);
687 
688 	if (len == maxlen)
689 		return TEE_ERROR_BAD_PARAMETERS;
690 
691 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
692 	arg = (struct dl_entry_arg *)usr_stack;
693 
694 	res = tee_mmu_check_access_rights(&utc->uctx,
695 					  TEE_MEMORY_ACCESS_READ |
696 					  TEE_MEMORY_ACCESS_WRITE |
697 					  TEE_MEMORY_ACCESS_ANY_OWNER,
698 					  (uaddr_t)arg, sizeof(*arg) + len + 1);
699 	if (res) {
700 		EMSG("ldelf stack is inaccessible!");
701 		return res;
702 	}
703 
704 	memset(arg, 0, sizeof(*arg));
705 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
706 	arg->dlsym.uuid = *uuid;
707 	memcpy(arg->dlsym.symbol, sym, len);
708 	arg->dlsym.symbol[len] = '\0';
709 
710 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
711 				     usr_stack, utc->dl_entry_func,
712 				     is_arm32, &panicked, &panic_code);
713 	if (panicked) {
714 		EMSG("ldelf dl_entry function panicked");
715 		abort_print_current_ta();
716 		res = TEE_ERROR_TARGET_DEAD;
717 	}
718 	if (!res) {
719 		res = arg->ret;
720 		if (!res)
721 			*val = arg->dlsym.val;
722 	}
723 
724 	return res;
725 }
726 
727 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types,
728 				TEE_Param params[TEE_NUM_PARAMS])
729 {
730 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
731 					  TEE_PARAM_TYPE_VALUE_INPUT,
732 					  TEE_PARAM_TYPE_NONE,
733 					  TEE_PARAM_TYPE_NONE);
734 	TEE_Result res = TEE_ERROR_GENERIC;
735 	struct tee_ta_session *s = NULL;
736 	struct user_ta_ctx *utc = NULL;
737 	TEE_UUID *uuid = NULL;
738 	uint32_t flags = 0;
739 
740 	if (exp_pt != param_types)
741 		return TEE_ERROR_BAD_PARAMETERS;
742 
743 	uuid = params[0].memref.buffer;
744 	if (!uuid || params[0].memref.size != sizeof(*uuid))
745 		return TEE_ERROR_BAD_PARAMETERS;
746 
747 	flags = params[1].value.a;
748 
749 	utc = to_user_ta_ctx(cs->ctx);
750 
751 	s = tee_ta_pop_current_session();
752 	res = call_ldelf_dlopen(utc, uuid, flags);
753 	tee_ta_push_current_session(s);
754 
755 	return res;
756 }
757 
758 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types,
759 			       TEE_Param params[TEE_NUM_PARAMS])
760 {
761 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
762 					  TEE_PARAM_TYPE_MEMREF_INPUT,
763 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
764 					  TEE_PARAM_TYPE_NONE);
765 	TEE_Result res = TEE_ERROR_GENERIC;
766 	struct tee_ta_session *s = NULL;
767 	struct user_ta_ctx *utc = NULL;
768 	const char *sym = NULL;
769 	TEE_UUID *uuid = NULL;
770 	size_t maxlen = 0;
771 	vaddr_t va = 0;
772 
773 	if (exp_pt != param_types)
774 		return TEE_ERROR_BAD_PARAMETERS;
775 
776 	uuid = params[0].memref.buffer;
777 	if (uuid && params[0].memref.size != sizeof(*uuid))
778 		return TEE_ERROR_BAD_PARAMETERS;
779 
780 	sym = params[1].memref.buffer;
781 	if (!sym)
782 		return TEE_ERROR_BAD_PARAMETERS;
783 	maxlen = params[1].memref.size;
784 
785 	utc = to_user_ta_ctx(cs->ctx);
786 
787 	s = tee_ta_pop_current_session();
788 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
789 	tee_ta_push_current_session(s);
790 
791 	if (!res)
792 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
793 
794 	return res;
795 }
796 
797 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
798 					   TEE_Param params[TEE_NUM_PARAMS])
799 {
800 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
801 					  TEE_PARAM_TYPE_NONE,
802 					  TEE_PARAM_TYPE_NONE,
803 					  TEE_PARAM_TYPE_NONE);
804 	size_t size = 0;
805 	TEE_Result res = TEE_SUCCESS;
806 
807 	if (exp_pt != param_types)
808 		return TEE_ERROR_BAD_PARAMETERS;
809 
810 	size = params[0].memref.size;
811 	res = tpm_get_event_log(params[0].memref.buffer, &size);
812 	params[0].memref.size = size;
813 
814 	return res;
815 }
816 
817 static TEE_Result open_session(uint32_t param_types __unused,
818 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
819 			       void **sess_ctx)
820 {
821 	struct tee_ta_session *s = NULL;
822 	struct system_ctx *ctx = NULL;
823 
824 	/* Check that we're called from a user TA */
825 	s = tee_ta_get_calling_session();
826 	if (!s)
827 		return TEE_ERROR_ACCESS_DENIED;
828 	if (!is_user_ta_ctx(s->ctx))
829 		return TEE_ERROR_ACCESS_DENIED;
830 
831 	ctx = calloc(1, sizeof(*ctx));
832 	if (!ctx)
833 		return TEE_ERROR_OUT_OF_MEMORY;
834 
835 	*sess_ctx = ctx;
836 
837 	return TEE_SUCCESS;
838 }
839 
840 static void close_session(void *sess_ctx)
841 {
842 	struct system_ctx *ctx = sess_ctx;
843 
844 	handle_db_destroy(&ctx->db, ta_bin_close);
845 	free(ctx);
846 }
847 
848 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
849 				 uint32_t param_types,
850 				 TEE_Param params[TEE_NUM_PARAMS])
851 {
852 	struct tee_ta_session *s = tee_ta_get_calling_session();
853 
854 	switch (cmd_id) {
855 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
856 		return system_rng_reseed(s, param_types, params);
857 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
858 		return system_derive_ta_unique_key(s, param_types, params);
859 	case PTA_SYSTEM_MAP_ZI:
860 		return system_map_zi(s, param_types, params);
861 	case PTA_SYSTEM_UNMAP:
862 		return system_unmap(s, param_types, params);
863 	case PTA_SYSTEM_OPEN_TA_BINARY:
864 		return system_open_ta_binary(sess_ctx, param_types, params);
865 	case PTA_SYSTEM_CLOSE_TA_BINARY:
866 		return system_close_ta_binary(sess_ctx, param_types, params);
867 	case PTA_SYSTEM_MAP_TA_BINARY:
868 		return system_map_ta_binary(sess_ctx, s, param_types, params);
869 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
870 		return system_copy_from_ta_binary(sess_ctx, param_types,
871 						  params);
872 	case PTA_SYSTEM_SET_PROT:
873 		return system_set_prot(s, param_types, params);
874 	case PTA_SYSTEM_REMAP:
875 		return system_remap(s, param_types, params);
876 	case PTA_SYSTEM_DLOPEN:
877 		return system_dlopen(s, param_types, params);
878 	case PTA_SYSTEM_DLSYM:
879 		return system_dlsym(s, param_types, params);
880 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
881 		return system_get_tpm_event_log(param_types, params);
882 	default:
883 		break;
884 	}
885 
886 	return TEE_ERROR_NOT_IMPLEMENTED;
887 }
888 
889 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
890 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
891 		   .open_session_entry_point = open_session,
892 		   .close_session_entry_point = close_session,
893 		   .invoke_command_entry_point = invoke_command);
894