xref: /optee_os/core/pta/system.c (revision 9c34c0c78f5e69de1c18dd644e724a848eee8e94)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited.
5  */
6 
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/handle.h>
10 #include <kernel/huk_subkey.h>
11 #include <kernel/misc.h>
12 #include <kernel/msg_param.h>
13 #include <kernel/pseudo_ta.h>
14 #include <kernel/tpm.h>
15 #include <kernel/ts_store.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <ldelf.h>
18 #include <mm/file.h>
19 #include <mm/fobj.h>
20 #include <mm/vm.h>
21 #include <pta_system.h>
22 #include <stdlib_ext.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <tee_api_defines_extensions.h>
26 #include <tee_api_defines.h>
27 #include <util.h>
28 
29 struct bin_handle {
30 	const struct ts_store_ops *op;
31 	struct ts_store_handle *h;
32 	struct file *f;
33 	size_t offs_bytes;
34 	size_t size_bytes;
35 };
36 
37 struct system_ctx {
38 	struct handle_db db;
39 	const struct ts_store_ops *store_op;
40 };
41 
42 static unsigned int system_pnum;
43 
44 static TEE_Result system_rng_reseed(uint32_t param_types,
45 				    TEE_Param params[TEE_NUM_PARAMS])
46 {
47 	size_t entropy_sz = 0;
48 	uint8_t *entropy_input = NULL;
49 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
50 					  TEE_PARAM_TYPE_NONE,
51 					  TEE_PARAM_TYPE_NONE,
52 					  TEE_PARAM_TYPE_NONE);
53 
54 	if (exp_pt != param_types)
55 		return TEE_ERROR_BAD_PARAMETERS;
56 	entropy_input = params[0].memref.buffer;
57 	entropy_sz = params[0].memref.size;
58 
59 	if (!entropy_sz || !entropy_input)
60 		return TEE_ERROR_BAD_PARAMETERS;
61 
62 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
63 			     entropy_input, entropy_sz);
64 	return TEE_SUCCESS;
65 }
66 
67 static TEE_Result system_derive_ta_unique_key(struct user_mode_ctx *uctx,
68 					      uint32_t param_types,
69 					      TEE_Param params[TEE_NUM_PARAMS])
70 {
71 	size_t data_len = sizeof(TEE_UUID);
72 	TEE_Result res = TEE_ERROR_GENERIC;
73 	uint8_t *data = NULL;
74 	uint32_t access_flags = 0;
75 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
76 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
77 					  TEE_PARAM_TYPE_NONE,
78 					  TEE_PARAM_TYPE_NONE);
79 
80 	if (exp_pt != param_types)
81 		return TEE_ERROR_BAD_PARAMETERS;
82 
83 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
84 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
85 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
86 		return TEE_ERROR_BAD_PARAMETERS;
87 
88 	/*
89 	 * The derived key shall not end up in non-secure memory by
90 	 * mistake.
91 	 *
92 	 * Note that we're allowing shared memory as long as it's
93 	 * secure. This is needed because a TA always uses shared memory
94 	 * when communicating with another TA.
95 	 */
96 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
97 		       TEE_MEMORY_ACCESS_SECURE;
98 	res = vm_check_access_rights(uctx, access_flags,
99 				     (uaddr_t)params[1].memref.buffer,
100 				     params[1].memref.size);
101 	if (res != TEE_SUCCESS)
102 		return TEE_ERROR_SECURITY;
103 
104 	/* Take extra data into account. */
105 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
106 		return TEE_ERROR_SECURITY;
107 
108 	data = calloc(data_len, 1);
109 	if (!data)
110 		return TEE_ERROR_OUT_OF_MEMORY;
111 
112 	memcpy(data, &uctx->ts_ctx->uuid, sizeof(TEE_UUID));
113 
114 	/* Append the user provided data */
115 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
116 	       params[0].memref.size);
117 
118 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
119 				params[1].memref.buffer,
120 				params[1].memref.size);
121 	free_wipe(data);
122 
123 	return res;
124 }
125 
126 static TEE_Result system_map_zi(struct user_mode_ctx *uctx,
127 				uint32_t param_types,
128 				TEE_Param params[TEE_NUM_PARAMS])
129 {
130 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
131 					  TEE_PARAM_TYPE_VALUE_INOUT,
132 					  TEE_PARAM_TYPE_VALUE_INPUT,
133 					  TEE_PARAM_TYPE_NONE);
134 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
135 	TEE_Result res = TEE_ERROR_GENERIC;
136 	struct mobj *mobj = NULL;
137 	uint32_t pad_begin = 0;
138 	uint32_t vm_flags = 0;
139 	struct fobj *f = NULL;
140 	uint32_t pad_end = 0;
141 	size_t num_bytes = 0;
142 	vaddr_t va = 0;
143 
144 	if (exp_pt != param_types)
145 		return TEE_ERROR_BAD_PARAMETERS;
146 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
147 		return TEE_ERROR_BAD_PARAMETERS;
148 
149 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
150 		vm_flags |= VM_FLAG_SHAREABLE;
151 
152 	num_bytes = params[0].value.a;
153 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
154 	pad_begin = params[2].value.a;
155 	pad_end = params[2].value.b;
156 
157 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
158 	if (!f)
159 		return TEE_ERROR_OUT_OF_MEMORY;
160 	mobj = mobj_with_fobj_alloc(f, NULL);
161 	fobj_put(f);
162 	if (!mobj)
163 		return TEE_ERROR_OUT_OF_MEMORY;
164 	res = vm_map_pad(uctx, &va, num_bytes, prot, vm_flags,
165 			 mobj, 0, pad_begin, pad_end, 0);
166 	mobj_put(mobj);
167 	if (!res)
168 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
169 
170 	return res;
171 }
172 
173 static TEE_Result system_unmap(struct user_mode_ctx *uctx, uint32_t param_types,
174 			       TEE_Param params[TEE_NUM_PARAMS])
175 {
176 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
177 					  TEE_PARAM_TYPE_VALUE_INPUT,
178 					  TEE_PARAM_TYPE_NONE,
179 					  TEE_PARAM_TYPE_NONE);
180 	TEE_Result res = TEE_SUCCESS;
181 	uint32_t vm_flags = 0;
182 	vaddr_t end_va = 0;
183 	vaddr_t va = 0;
184 	size_t sz = 0;
185 
186 	if (exp_pt != param_types)
187 		return TEE_ERROR_BAD_PARAMETERS;
188 
189 	if (params[0].value.b)
190 		return TEE_ERROR_BAD_PARAMETERS;
191 
192 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
193 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
194 
195 	/*
196 	 * The vm_get_flags() and vm_unmap() are supposed to detect or
197 	 * handle overflow directly or indirectly. However, this function
198 	 * an API function so an extra guard here is in order. If nothing
199 	 * else to make it easier to review the code.
200 	 */
201 	if (ADD_OVERFLOW(va, sz, &end_va))
202 		return TEE_ERROR_BAD_PARAMETERS;
203 
204 	res = vm_get_flags(uctx, va, sz, &vm_flags);
205 	if (res)
206 		return res;
207 	if (vm_flags & VM_FLAG_PERMANENT)
208 		return TEE_ERROR_ACCESS_DENIED;
209 
210 	return vm_unmap(uctx, va, sz);
211 }
212 
213 static void ta_bin_close(void *ptr)
214 {
215 	struct bin_handle *binh = ptr;
216 
217 	if (binh) {
218 		if (binh->op && binh->h)
219 			binh->op->close(binh->h);
220 		file_put(binh->f);
221 	}
222 	free(binh);
223 }
224 
225 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
226 					uint32_t param_types,
227 					TEE_Param params[TEE_NUM_PARAMS])
228 {
229 	TEE_Result res = TEE_SUCCESS;
230 	struct bin_handle *binh = NULL;
231 	int h = 0;
232 	TEE_UUID *uuid = NULL;
233 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
234 	unsigned int tag_len = sizeof(tag);
235 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
236 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
237 					  TEE_PARAM_TYPE_NONE,
238 					  TEE_PARAM_TYPE_NONE);
239 
240 	if (exp_pt != param_types)
241 		return TEE_ERROR_BAD_PARAMETERS;
242 	if (params[0].memref.size != sizeof(*uuid))
243 		return TEE_ERROR_BAD_PARAMETERS;
244 
245 	uuid = params[0].memref.buffer;
246 
247 	binh = calloc(1, sizeof(*binh));
248 	if (!binh)
249 		return TEE_ERROR_OUT_OF_MEMORY;
250 
251 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct ts_store_ops) {
252 		DMSG("Lookup user TA ELF %pUl (%s)",
253 		     (void *)uuid, binh->op->description);
254 
255 		res = binh->op->open(uuid, &binh->h);
256 		DMSG("res=0x%x", res);
257 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
258 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
259 			break;
260 	}
261 	if (res)
262 		goto err;
263 
264 	res = binh->op->get_size(binh->h, &binh->size_bytes);
265 	if (res)
266 		goto err;
267 	res = binh->op->get_tag(binh->h, tag, &tag_len);
268 	if (res)
269 		goto err;
270 	binh->f = file_get_by_tag(tag, tag_len);
271 	if (!binh->f)
272 		goto err_oom;
273 
274 	h = handle_get(&ctx->db, binh);
275 	if (h < 0)
276 		goto err_oom;
277 	params[0].value.a = h;
278 
279 	return TEE_SUCCESS;
280 err_oom:
281 	res = TEE_ERROR_OUT_OF_MEMORY;
282 err:
283 	ta_bin_close(binh);
284 	return res;
285 }
286 
287 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
288 					 uint32_t param_types,
289 					 TEE_Param params[TEE_NUM_PARAMS])
290 {
291 	TEE_Result res = TEE_SUCCESS;
292 	struct bin_handle *binh = NULL;
293 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
294 					  TEE_PARAM_TYPE_NONE,
295 					  TEE_PARAM_TYPE_NONE,
296 					  TEE_PARAM_TYPE_NONE);
297 
298 	if (exp_pt != param_types)
299 		return TEE_ERROR_BAD_PARAMETERS;
300 
301 	if (params[0].value.b)
302 		return TEE_ERROR_BAD_PARAMETERS;
303 
304 	binh = handle_put(&ctx->db, params[0].value.a);
305 	if (!binh)
306 		return TEE_ERROR_BAD_PARAMETERS;
307 
308 	if (binh->offs_bytes < binh->size_bytes)
309 		res = binh->op->read(binh->h, NULL,
310 				     binh->size_bytes - binh->offs_bytes);
311 
312 	ta_bin_close(binh);
313 	return res;
314 }
315 
316 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
317 			       size_t offs_bytes, size_t num_bytes)
318 {
319 	TEE_Result res = TEE_SUCCESS;
320 	size_t next_offs = 0;
321 
322 	if (offs_bytes < binh->offs_bytes)
323 		return TEE_ERROR_BAD_STATE;
324 
325 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
326 		return TEE_ERROR_BAD_PARAMETERS;
327 
328 	if (offs_bytes > binh->offs_bytes) {
329 		res = binh->op->read(binh->h, NULL,
330 				     offs_bytes - binh->offs_bytes);
331 		if (res)
332 			return res;
333 		binh->offs_bytes = offs_bytes;
334 	}
335 
336 	if (next_offs > binh->size_bytes) {
337 		size_t rb = binh->size_bytes - binh->offs_bytes;
338 
339 		res = binh->op->read(binh->h, (void *)va, rb);
340 		if (res)
341 			return res;
342 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
343 		binh->offs_bytes = binh->size_bytes;
344 	} else {
345 		res = binh->op->read(binh->h, (void *)va, num_bytes);
346 		if (res)
347 			return res;
348 		binh->offs_bytes = next_offs;
349 	}
350 
351 	return TEE_SUCCESS;
352 }
353 
354 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
355 				       struct user_mode_ctx *uctx,
356 				       uint32_t param_types,
357 				       TEE_Param params[TEE_NUM_PARAMS])
358 {
359 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
360 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
361 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
362 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
363 					  TEE_PARAM_TYPE_VALUE_INPUT,
364 					  TEE_PARAM_TYPE_VALUE_INOUT,
365 					  TEE_PARAM_TYPE_VALUE_INPUT);
366 	struct bin_handle *binh = NULL;
367 	uint32_t num_rounded_bytes = 0;
368 	TEE_Result res = TEE_SUCCESS;
369 	struct file_slice *fs = NULL;
370 	bool file_is_locked = false;
371 	struct mobj *mobj = NULL;
372 	uint32_t offs_bytes = 0;
373 	uint32_t offs_pages = 0;
374 	uint32_t num_bytes = 0;
375 	uint32_t pad_begin = 0;
376 	uint32_t pad_end = 0;
377 	size_t num_pages = 0;
378 	uint32_t flags = 0;
379 	uint32_t prot = 0;
380 	vaddr_t va = 0;
381 
382 	if (exp_pt != param_types)
383 		return TEE_ERROR_BAD_PARAMETERS;
384 
385 	binh = handle_lookup(&ctx->db, params[0].value.a);
386 	if (!binh)
387 		return TEE_ERROR_BAD_PARAMETERS;
388 	flags = params[0].value.b;
389 	offs_bytes = params[1].value.a;
390 	num_bytes = params[1].value.b;
391 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
392 	pad_begin = params[3].value.a;
393 	pad_end = params[3].value.b;
394 
395 	if ((flags & accept_flags) != flags)
396 		return TEE_ERROR_BAD_PARAMETERS;
397 
398 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
399 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
400 		return TEE_ERROR_BAD_PARAMETERS;
401 
402 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
403 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
404 		return TEE_ERROR_BAD_PARAMETERS;
405 
406 	if (offs_bytes & SMALL_PAGE_MASK)
407 		return TEE_ERROR_BAD_PARAMETERS;
408 
409 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
410 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
411 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
412 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
413 		prot |= TEE_MATTR_UX;
414 
415 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
416 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
417 		return TEE_ERROR_BAD_PARAMETERS;
418 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
419 
420 	if (!file_trylock(binh->f)) {
421 		/*
422 		 * Before we can block on the file lock we must make all
423 		 * our page tables available for reclaiming in order to
424 		 * avoid a dead-lock with the other thread (which already
425 		 * is holding the file lock) mapping lots of memory below.
426 		 */
427 		vm_set_ctx(NULL);
428 		file_lock(binh->f);
429 		vm_set_ctx(uctx->ts_ctx);
430 	}
431 	file_is_locked = true;
432 	fs = file_find_slice(binh->f, offs_pages);
433 	if (fs) {
434 		/* If there's registered slice it has to match */
435 		if (fs->page_offset != offs_pages ||
436 		    num_pages > fs->fobj->num_pages) {
437 			res = TEE_ERROR_BAD_PARAMETERS;
438 			goto err;
439 		}
440 
441 		/* If there's a slice we must be mapping shareable */
442 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
443 			res = TEE_ERROR_BAD_PARAMETERS;
444 			goto err;
445 		}
446 
447 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
448 		if (!mobj) {
449 			res = TEE_ERROR_OUT_OF_MEMORY;
450 			goto err;
451 		}
452 		res = vm_map_pad(uctx, &va, num_rounded_bytes,
453 				 prot, VM_FLAG_READONLY,
454 				 mobj, 0, pad_begin, pad_end, 0);
455 		mobj_put(mobj);
456 		if (res)
457 			goto err;
458 	} else {
459 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
460 		struct file *file = NULL;
461 		uint32_t vm_flags = 0;
462 
463 		if (!f) {
464 			res = TEE_ERROR_OUT_OF_MEMORY;
465 			goto err;
466 		}
467 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
468 			file = binh->f;
469 			vm_flags |= VM_FLAG_READONLY;
470 		}
471 
472 		mobj = mobj_with_fobj_alloc(f, file);
473 		fobj_put(f);
474 		if (!mobj) {
475 			res = TEE_ERROR_OUT_OF_MEMORY;
476 			goto err;
477 		}
478 		res = vm_map_pad(uctx, &va, num_rounded_bytes,
479 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
480 				 pad_begin, pad_end, 0);
481 		mobj_put(mobj);
482 		if (res)
483 			goto err;
484 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
485 		if (res)
486 			goto err_unmap_va;
487 		res = vm_set_prot(uctx, va, num_rounded_bytes,
488 				  prot);
489 		if (res)
490 			goto err_unmap_va;
491 
492 		/*
493 		 * The context currently is active set it again to update
494 		 * the mapping.
495 		 */
496 		vm_set_ctx(uctx->ts_ctx);
497 
498 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
499 			res = file_add_slice(binh->f, f, offs_pages);
500 			if (res)
501 				goto err_unmap_va;
502 		}
503 	}
504 
505 	file_unlock(binh->f);
506 
507 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
508 	return TEE_SUCCESS;
509 
510 err_unmap_va:
511 	if (vm_unmap(uctx, va, num_rounded_bytes))
512 		panic();
513 
514 	/*
515 	 * The context currently is active set it again to update
516 	 * the mapping.
517 	 */
518 	vm_set_ctx(uctx->ts_ctx);
519 
520 err:
521 	if (file_is_locked)
522 		file_unlock(binh->f);
523 
524 	return res;
525 }
526 
527 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
528 					     uint32_t param_types,
529 					     TEE_Param params[TEE_NUM_PARAMS])
530 {
531 	struct bin_handle *binh = NULL;
532 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
533 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
534 					  TEE_PARAM_TYPE_NONE,
535 					  TEE_PARAM_TYPE_NONE);
536 
537 	if (exp_pt != param_types)
538 		return TEE_ERROR_BAD_PARAMETERS;
539 
540 	binh = handle_lookup(&ctx->db, params[0].value.a);
541 	if (!binh)
542 		return TEE_ERROR_BAD_PARAMETERS;
543 
544 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
545 			    params[0].value.b, params[1].memref.size);
546 }
547 
548 static TEE_Result system_set_prot(struct user_mode_ctx *uctx,
549 				  uint32_t param_types,
550 				  TEE_Param params[TEE_NUM_PARAMS])
551 {
552 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
553 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
554 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
555 					  TEE_PARAM_TYPE_VALUE_INPUT,
556 					  TEE_PARAM_TYPE_NONE,
557 					  TEE_PARAM_TYPE_NONE);
558 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
559 	TEE_Result res = TEE_SUCCESS;
560 	uint32_t vm_flags = 0;
561 	uint32_t flags = 0;
562 	vaddr_t end_va = 0;
563 	vaddr_t va = 0;
564 	size_t sz = 0;
565 
566 	if (exp_pt != param_types)
567 		return TEE_ERROR_BAD_PARAMETERS;
568 
569 	flags = params[0].value.b;
570 
571 	if ((flags & accept_flags) != flags)
572 		return TEE_ERROR_BAD_PARAMETERS;
573 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
574 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
575 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
576 		prot |= TEE_MATTR_UX;
577 
578 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
579 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
580 
581 	/*
582 	 * The vm_get_flags() and vm_set_prot() are supposed to detect or
583 	 * handle overflow directly or indirectly. However, this function
584 	 * an API function so an extra guard here is in order. If nothing
585 	 * else to make it easier to review the code.
586 	 */
587 	if (ADD_OVERFLOW(va, sz, &end_va))
588 		return TEE_ERROR_BAD_PARAMETERS;
589 
590 	res = vm_get_flags(uctx, va, sz, &vm_flags);
591 	if (res)
592 		return res;
593 	if (vm_flags & VM_FLAG_PERMANENT)
594 		return TEE_ERROR_ACCESS_DENIED;
595 
596 	/*
597 	 * If the segment is a mapping of a part of a file (vm_flags &
598 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
599 	 * files are mapped read-only.
600 	 */
601 	if ((vm_flags & VM_FLAG_READONLY) &&
602 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
603 		return TEE_ERROR_ACCESS_DENIED;
604 
605 	return vm_set_prot(uctx, va, sz, prot);
606 }
607 
608 static TEE_Result system_remap(struct user_mode_ctx *uctx, uint32_t param_types,
609 			       TEE_Param params[TEE_NUM_PARAMS])
610 {
611 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
612 					  TEE_PARAM_TYPE_VALUE_INPUT,
613 					  TEE_PARAM_TYPE_VALUE_INOUT,
614 					  TEE_PARAM_TYPE_VALUE_INPUT);
615 	TEE_Result res = TEE_SUCCESS;
616 	uint32_t num_bytes = 0;
617 	uint32_t pad_begin = 0;
618 	uint32_t vm_flags = 0;
619 	uint32_t pad_end = 0;
620 	vaddr_t old_va = 0;
621 	vaddr_t new_va = 0;
622 
623 	if (exp_pt != param_types)
624 		return TEE_ERROR_BAD_PARAMETERS;
625 
626 	num_bytes = params[0].value.a;
627 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
628 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
629 	pad_begin = params[3].value.a;
630 	pad_end = params[3].value.b;
631 
632 	res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
633 	if (res)
634 		return res;
635 	if (vm_flags & VM_FLAG_PERMANENT)
636 		return TEE_ERROR_ACCESS_DENIED;
637 
638 	res = vm_remap(uctx, &new_va, old_va, num_bytes, pad_begin,
639 		       pad_end);
640 	if (!res)
641 		reg_pair_from_64(new_va, &params[2].value.a,
642 				 &params[2].value.b);
643 
644 	return res;
645 }
646 
647 /* ldelf has the same architecture/register width as the kernel */
648 #ifdef ARM32
649 static const bool is_arm32 = true;
650 #else
651 static const bool is_arm32;
652 #endif
653 
654 static TEE_Result call_ldelf_dlopen(struct user_mode_ctx *uctx, TEE_UUID *uuid,
655 				    uint32_t flags)
656 {
657 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
658 	TEE_Result res = TEE_ERROR_GENERIC;
659 	struct dl_entry_arg *arg = NULL;
660 	uint32_t panic_code = 0;
661 	uint32_t panicked = 0;
662 
663 	assert(uuid);
664 
665 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
666 	arg = (struct dl_entry_arg *)usr_stack;
667 
668 	res = vm_check_access_rights(uctx,
669 				     TEE_MEMORY_ACCESS_READ |
670 				     TEE_MEMORY_ACCESS_WRITE |
671 				     TEE_MEMORY_ACCESS_ANY_OWNER,
672 				     (uaddr_t)arg, sizeof(*arg));
673 	if (res) {
674 		EMSG("ldelf stack is inaccessible!");
675 		return res;
676 	}
677 
678 	memset(arg, 0, sizeof(*arg));
679 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
680 	arg->dlopen.uuid = *uuid;
681 	arg->dlopen.flags = flags;
682 
683 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
684 				     usr_stack, uctx->dl_entry_func,
685 				     is_arm32, &panicked, &panic_code);
686 	if (panicked) {
687 		EMSG("ldelf dl_entry function panicked");
688 		abort_print_current_ta();
689 		res = TEE_ERROR_TARGET_DEAD;
690 	}
691 	if (!res)
692 		res = arg->ret;
693 
694 	return res;
695 }
696 
697 static TEE_Result call_ldelf_dlsym(struct user_mode_ctx *uctx, TEE_UUID *uuid,
698 				   const char *sym, size_t maxlen, vaddr_t *val)
699 {
700 	uaddr_t usr_stack = uctx->ldelf_stack_ptr;
701 	TEE_Result res = TEE_ERROR_GENERIC;
702 	struct dl_entry_arg *arg = NULL;
703 	uint32_t panic_code = 0;
704 	uint32_t panicked = 0;
705 	size_t len = strnlen(sym, maxlen);
706 
707 	if (len == maxlen)
708 		return TEE_ERROR_BAD_PARAMETERS;
709 
710 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
711 	arg = (struct dl_entry_arg *)usr_stack;
712 
713 	res = vm_check_access_rights(uctx,
714 				     TEE_MEMORY_ACCESS_READ |
715 				     TEE_MEMORY_ACCESS_WRITE |
716 				     TEE_MEMORY_ACCESS_ANY_OWNER,
717 				     (uaddr_t)arg, sizeof(*arg) + len + 1);
718 	if (res) {
719 		EMSG("ldelf stack is inaccessible!");
720 		return res;
721 	}
722 
723 	memset(arg, 0, sizeof(*arg));
724 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
725 	arg->dlsym.uuid = *uuid;
726 	memcpy(arg->dlsym.symbol, sym, len);
727 	arg->dlsym.symbol[len] = '\0';
728 
729 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
730 				     usr_stack, uctx->dl_entry_func,
731 				     is_arm32, &panicked, &panic_code);
732 	if (panicked) {
733 		EMSG("ldelf dl_entry function panicked");
734 		abort_print_current_ta();
735 		res = TEE_ERROR_TARGET_DEAD;
736 	}
737 	if (!res) {
738 		res = arg->ret;
739 		if (!res)
740 			*val = arg->dlsym.val;
741 	}
742 
743 	return res;
744 }
745 
746 static TEE_Result system_dlopen(struct user_mode_ctx *uctx,
747 				uint32_t param_types,
748 				TEE_Param params[TEE_NUM_PARAMS])
749 {
750 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
751 					  TEE_PARAM_TYPE_VALUE_INPUT,
752 					  TEE_PARAM_TYPE_NONE,
753 					  TEE_PARAM_TYPE_NONE);
754 	TEE_Result res = TEE_ERROR_GENERIC;
755 	struct ts_session *s = NULL;
756 	TEE_UUID *uuid = NULL;
757 	uint32_t flags = 0;
758 
759 	if (exp_pt != param_types)
760 		return TEE_ERROR_BAD_PARAMETERS;
761 
762 	uuid = params[0].memref.buffer;
763 	if (!uuid || params[0].memref.size != sizeof(*uuid))
764 		return TEE_ERROR_BAD_PARAMETERS;
765 
766 	flags = params[1].value.a;
767 
768 	s = ts_pop_current_session();
769 	res = call_ldelf_dlopen(uctx, uuid, flags);
770 	ts_push_current_session(s);
771 
772 	return res;
773 }
774 
775 static TEE_Result system_dlsym(struct user_mode_ctx *uctx, uint32_t param_types,
776 			       TEE_Param params[TEE_NUM_PARAMS])
777 {
778 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
779 					  TEE_PARAM_TYPE_MEMREF_INPUT,
780 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
781 					  TEE_PARAM_TYPE_NONE);
782 	TEE_Result res = TEE_ERROR_GENERIC;
783 	struct ts_session *s = NULL;
784 	const char *sym = NULL;
785 	TEE_UUID *uuid = NULL;
786 	size_t maxlen = 0;
787 	vaddr_t va = 0;
788 
789 	if (exp_pt != param_types)
790 		return TEE_ERROR_BAD_PARAMETERS;
791 
792 	uuid = params[0].memref.buffer;
793 	if (uuid && params[0].memref.size != sizeof(*uuid))
794 		return TEE_ERROR_BAD_PARAMETERS;
795 
796 	sym = params[1].memref.buffer;
797 	if (!sym)
798 		return TEE_ERROR_BAD_PARAMETERS;
799 	maxlen = params[1].memref.size;
800 
801 	s = ts_pop_current_session();
802 	res = call_ldelf_dlsym(uctx, uuid, sym, maxlen, &va);
803 	ts_push_current_session(s);
804 
805 	if (!res)
806 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
807 
808 	return res;
809 }
810 
811 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
812 					   TEE_Param params[TEE_NUM_PARAMS])
813 {
814 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
815 					  TEE_PARAM_TYPE_NONE,
816 					  TEE_PARAM_TYPE_NONE,
817 					  TEE_PARAM_TYPE_NONE);
818 	size_t size = 0;
819 	TEE_Result res = TEE_SUCCESS;
820 
821 	if (exp_pt != param_types)
822 		return TEE_ERROR_BAD_PARAMETERS;
823 
824 	size = params[0].memref.size;
825 	res = tpm_get_event_log(params[0].memref.buffer, &size);
826 	params[0].memref.size = size;
827 
828 	return res;
829 }
830 
831 static TEE_Result open_session(uint32_t param_types __unused,
832 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
833 			       void **sess_ctx)
834 {
835 	struct ts_session *s = NULL;
836 	struct system_ctx *ctx = NULL;
837 
838 	/* Check that we're called from a user TA */
839 	s = ts_get_calling_session();
840 	if (!s)
841 		return TEE_ERROR_ACCESS_DENIED;
842 	if (!is_user_ta_ctx(s->ctx))
843 		return TEE_ERROR_ACCESS_DENIED;
844 
845 	ctx = calloc(1, sizeof(*ctx));
846 	if (!ctx)
847 		return TEE_ERROR_OUT_OF_MEMORY;
848 
849 	*sess_ctx = ctx;
850 
851 	return TEE_SUCCESS;
852 }
853 
854 static void close_session(void *sess_ctx)
855 {
856 	struct system_ctx *ctx = sess_ctx;
857 
858 	handle_db_destroy(&ctx->db, ta_bin_close);
859 	free(ctx);
860 }
861 
862 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
863 				 uint32_t param_types,
864 				 TEE_Param params[TEE_NUM_PARAMS])
865 {
866 	struct ts_session *s = ts_get_calling_session();
867 	struct user_mode_ctx *uctx = to_user_mode_ctx(s->ctx);
868 
869 	switch (cmd_id) {
870 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
871 		return system_rng_reseed(param_types, params);
872 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
873 		return system_derive_ta_unique_key(uctx, param_types, params);
874 	case PTA_SYSTEM_MAP_ZI:
875 		return system_map_zi(uctx, param_types, params);
876 	case PTA_SYSTEM_UNMAP:
877 		return system_unmap(uctx, param_types, params);
878 	case PTA_SYSTEM_OPEN_TA_BINARY:
879 		return system_open_ta_binary(sess_ctx, param_types, params);
880 	case PTA_SYSTEM_CLOSE_TA_BINARY:
881 		return system_close_ta_binary(sess_ctx, param_types, params);
882 	case PTA_SYSTEM_MAP_TA_BINARY:
883 		return system_map_ta_binary(sess_ctx, uctx, param_types,
884 					    params);
885 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
886 		return system_copy_from_ta_binary(sess_ctx, param_types,
887 						  params);
888 	case PTA_SYSTEM_SET_PROT:
889 		return system_set_prot(uctx, param_types, params);
890 	case PTA_SYSTEM_REMAP:
891 		return system_remap(uctx, param_types, params);
892 	case PTA_SYSTEM_DLOPEN:
893 		return system_dlopen(uctx, param_types, params);
894 	case PTA_SYSTEM_DLSYM:
895 		return system_dlsym(uctx, param_types, params);
896 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
897 		return system_get_tpm_event_log(param_types, params);
898 	default:
899 		break;
900 	}
901 
902 	return TEE_ERROR_NOT_IMPLEMENTED;
903 }
904 
905 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
906 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
907 		   .open_session_entry_point = open_session,
908 		   .close_session_entry_point = close_session,
909 		   .invoke_command_entry_point = invoke_command);
910