xref: /optee_os/core/pta/system.c (revision 6cb02818e7fb1918ad6e5d112cb3f6f765c46fd0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited.
5  */
6 
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/handle.h>
10 #include <kernel/huk_subkey.h>
11 #include <kernel/misc.h>
12 #include <kernel/msg_param.h>
13 #include <kernel/pseudo_ta.h>
14 #include <kernel/tpm.h>
15 #include <kernel/ts_store.h>
16 #include <kernel/user_ta.h>
17 #include <ldelf.h>
18 #include <mm/file.h>
19 #include <mm/fobj.h>
20 #include <mm/vm.h>
21 #include <pta_system.h>
22 #include <stdlib_ext.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <tee_api_defines_extensions.h>
26 #include <tee_api_defines.h>
27 #include <util.h>
28 
29 struct bin_handle {
30 	const struct ts_store_ops *op;
31 	struct ts_store_handle *h;
32 	struct file *f;
33 	size_t offs_bytes;
34 	size_t size_bytes;
35 };
36 
37 struct system_ctx {
38 	struct handle_db db;
39 	const struct ts_store_ops *store_op;
40 };
41 
42 static unsigned int system_pnum;
43 
44 static TEE_Result system_rng_reseed(struct ts_session *s __unused,
45 				    uint32_t param_types,
46 				    TEE_Param params[TEE_NUM_PARAMS])
47 {
48 	size_t entropy_sz = 0;
49 	uint8_t *entropy_input = NULL;
50 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
51 					  TEE_PARAM_TYPE_NONE,
52 					  TEE_PARAM_TYPE_NONE,
53 					  TEE_PARAM_TYPE_NONE);
54 
55 	if (exp_pt != param_types)
56 		return TEE_ERROR_BAD_PARAMETERS;
57 	entropy_input = params[0].memref.buffer;
58 	entropy_sz = params[0].memref.size;
59 
60 	if (!entropy_sz || !entropy_input)
61 		return TEE_ERROR_BAD_PARAMETERS;
62 
63 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
64 			     entropy_input, entropy_sz);
65 	return TEE_SUCCESS;
66 }
67 
68 static TEE_Result system_derive_ta_unique_key(struct ts_session *s,
69 					      uint32_t param_types,
70 					      TEE_Param params[TEE_NUM_PARAMS])
71 {
72 	size_t data_len = sizeof(TEE_UUID);
73 	TEE_Result res = TEE_ERROR_GENERIC;
74 	uint8_t *data = NULL;
75 	uint32_t access_flags = 0;
76 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
77 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
78 					  TEE_PARAM_TYPE_NONE,
79 					  TEE_PARAM_TYPE_NONE);
80 	struct user_ta_ctx *utc = NULL;
81 
82 	if (exp_pt != param_types)
83 		return TEE_ERROR_BAD_PARAMETERS;
84 
85 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
86 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
87 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
88 		return TEE_ERROR_BAD_PARAMETERS;
89 
90 	utc = to_user_ta_ctx(s->ctx);
91 
92 	/*
93 	 * The derived key shall not end up in non-secure memory by
94 	 * mistake.
95 	 *
96 	 * Note that we're allowing shared memory as long as it's
97 	 * secure. This is needed because a TA always uses shared memory
98 	 * when communicating with another TA.
99 	 */
100 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
101 		       TEE_MEMORY_ACCESS_SECURE;
102 	res = vm_check_access_rights(&utc->uctx, access_flags,
103 				     (uaddr_t)params[1].memref.buffer,
104 				     params[1].memref.size);
105 	if (res != TEE_SUCCESS)
106 		return TEE_ERROR_SECURITY;
107 
108 	/* Take extra data into account. */
109 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
110 		return TEE_ERROR_SECURITY;
111 
112 	data = calloc(data_len, 1);
113 	if (!data)
114 		return TEE_ERROR_OUT_OF_MEMORY;
115 
116 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
117 
118 	/* Append the user provided data */
119 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
120 	       params[0].memref.size);
121 
122 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
123 				params[1].memref.buffer,
124 				params[1].memref.size);
125 	free_wipe(data);
126 
127 	return res;
128 }
129 
130 static TEE_Result system_map_zi(struct ts_session *s, uint32_t param_types,
131 				TEE_Param params[TEE_NUM_PARAMS])
132 {
133 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
134 					  TEE_PARAM_TYPE_VALUE_INOUT,
135 					  TEE_PARAM_TYPE_VALUE_INPUT,
136 					  TEE_PARAM_TYPE_NONE);
137 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
138 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
139 	TEE_Result res = TEE_ERROR_GENERIC;
140 	struct mobj *mobj = NULL;
141 	uint32_t pad_begin = 0;
142 	uint32_t vm_flags = 0;
143 	struct fobj *f = NULL;
144 	uint32_t pad_end = 0;
145 	size_t num_bytes = 0;
146 	vaddr_t va = 0;
147 
148 	if (exp_pt != param_types)
149 		return TEE_ERROR_BAD_PARAMETERS;
150 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
151 		return TEE_ERROR_BAD_PARAMETERS;
152 
153 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
154 		vm_flags |= VM_FLAG_SHAREABLE;
155 
156 	num_bytes = params[0].value.a;
157 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
158 	pad_begin = params[2].value.a;
159 	pad_end = params[2].value.b;
160 
161 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
162 	if (!f)
163 		return TEE_ERROR_OUT_OF_MEMORY;
164 	mobj = mobj_with_fobj_alloc(f, NULL);
165 	fobj_put(f);
166 	if (!mobj)
167 		return TEE_ERROR_OUT_OF_MEMORY;
168 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
169 			 mobj, 0, pad_begin, pad_end, 0);
170 	mobj_put(mobj);
171 	if (!res)
172 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
173 
174 	return res;
175 }
176 
177 static TEE_Result system_unmap(struct ts_session *s, uint32_t param_types,
178 			       TEE_Param params[TEE_NUM_PARAMS])
179 {
180 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
181 					  TEE_PARAM_TYPE_VALUE_INPUT,
182 					  TEE_PARAM_TYPE_NONE,
183 					  TEE_PARAM_TYPE_NONE);
184 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
185 	TEE_Result res = TEE_SUCCESS;
186 	uint32_t vm_flags = 0;
187 	vaddr_t end_va = 0;
188 	vaddr_t va = 0;
189 	size_t sz = 0;
190 
191 	if (exp_pt != param_types)
192 		return TEE_ERROR_BAD_PARAMETERS;
193 
194 	if (params[0].value.b)
195 		return TEE_ERROR_BAD_PARAMETERS;
196 
197 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
198 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
199 
200 	/*
201 	 * The vm_get_flags() and vm_unmap() are supposed to detect or
202 	 * handle overflow directly or indirectly. However, this function
203 	 * an API function so an extra guard here is in order. If nothing
204 	 * else to make it easier to review the code.
205 	 */
206 	if (ADD_OVERFLOW(va, sz, &end_va))
207 		return TEE_ERROR_BAD_PARAMETERS;
208 
209 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
210 	if (res)
211 		return res;
212 	if (vm_flags & VM_FLAG_PERMANENT)
213 		return TEE_ERROR_ACCESS_DENIED;
214 
215 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz);
216 }
217 
218 static void ta_bin_close(void *ptr)
219 {
220 	struct bin_handle *binh = ptr;
221 
222 	if (binh) {
223 		if (binh->op && binh->h)
224 			binh->op->close(binh->h);
225 		file_put(binh->f);
226 	}
227 	free(binh);
228 }
229 
230 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
231 					uint32_t param_types,
232 					TEE_Param params[TEE_NUM_PARAMS])
233 {
234 	TEE_Result res = TEE_SUCCESS;
235 	struct bin_handle *binh = NULL;
236 	int h = 0;
237 	TEE_UUID *uuid = NULL;
238 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
239 	unsigned int tag_len = sizeof(tag);
240 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
241 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
242 					  TEE_PARAM_TYPE_NONE,
243 					  TEE_PARAM_TYPE_NONE);
244 
245 	if (exp_pt != param_types)
246 		return TEE_ERROR_BAD_PARAMETERS;
247 	if (params[0].memref.size != sizeof(*uuid))
248 		return TEE_ERROR_BAD_PARAMETERS;
249 
250 	uuid = params[0].memref.buffer;
251 
252 	binh = calloc(1, sizeof(*binh));
253 	if (!binh)
254 		return TEE_ERROR_OUT_OF_MEMORY;
255 
256 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct ts_store_ops) {
257 		DMSG("Lookup user TA ELF %pUl (%s)",
258 		     (void *)uuid, binh->op->description);
259 
260 		res = binh->op->open(uuid, &binh->h);
261 		DMSG("res=0x%x", res);
262 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
263 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
264 			break;
265 	}
266 	if (res)
267 		goto err;
268 
269 	res = binh->op->get_size(binh->h, &binh->size_bytes);
270 	if (res)
271 		goto err;
272 	res = binh->op->get_tag(binh->h, tag, &tag_len);
273 	if (res)
274 		goto err;
275 	binh->f = file_get_by_tag(tag, tag_len);
276 	if (!binh->f)
277 		goto err_oom;
278 
279 	h = handle_get(&ctx->db, binh);
280 	if (h < 0)
281 		goto err_oom;
282 	params[0].value.a = h;
283 
284 	return TEE_SUCCESS;
285 err_oom:
286 	res = TEE_ERROR_OUT_OF_MEMORY;
287 err:
288 	ta_bin_close(binh);
289 	return res;
290 }
291 
292 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
293 					 uint32_t param_types,
294 					 TEE_Param params[TEE_NUM_PARAMS])
295 {
296 	TEE_Result res = TEE_SUCCESS;
297 	struct bin_handle *binh = NULL;
298 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
299 					  TEE_PARAM_TYPE_NONE,
300 					  TEE_PARAM_TYPE_NONE,
301 					  TEE_PARAM_TYPE_NONE);
302 
303 	if (exp_pt != param_types)
304 		return TEE_ERROR_BAD_PARAMETERS;
305 
306 	if (params[0].value.b)
307 		return TEE_ERROR_BAD_PARAMETERS;
308 
309 	binh = handle_put(&ctx->db, params[0].value.a);
310 	if (!binh)
311 		return TEE_ERROR_BAD_PARAMETERS;
312 
313 	if (binh->offs_bytes < binh->size_bytes)
314 		res = binh->op->read(binh->h, NULL,
315 				     binh->size_bytes - binh->offs_bytes);
316 
317 	ta_bin_close(binh);
318 	return res;
319 }
320 
321 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
322 			       size_t offs_bytes, size_t num_bytes)
323 {
324 	TEE_Result res = TEE_SUCCESS;
325 	size_t next_offs = 0;
326 
327 	if (offs_bytes < binh->offs_bytes)
328 		return TEE_ERROR_BAD_STATE;
329 
330 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
331 		return TEE_ERROR_BAD_PARAMETERS;
332 
333 	if (offs_bytes > binh->offs_bytes) {
334 		res = binh->op->read(binh->h, NULL,
335 				     offs_bytes - binh->offs_bytes);
336 		if (res)
337 			return res;
338 		binh->offs_bytes = offs_bytes;
339 	}
340 
341 	if (next_offs > binh->size_bytes) {
342 		size_t rb = binh->size_bytes - binh->offs_bytes;
343 
344 		res = binh->op->read(binh->h, (void *)va, rb);
345 		if (res)
346 			return res;
347 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
348 		binh->offs_bytes = binh->size_bytes;
349 	} else {
350 		res = binh->op->read(binh->h, (void *)va, num_bytes);
351 		if (res)
352 			return res;
353 		binh->offs_bytes = next_offs;
354 	}
355 
356 	return TEE_SUCCESS;
357 }
358 
359 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
360 				       struct ts_session *s,
361 				       uint32_t param_types,
362 				       TEE_Param params[TEE_NUM_PARAMS])
363 {
364 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
365 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
366 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
367 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
368 					  TEE_PARAM_TYPE_VALUE_INPUT,
369 					  TEE_PARAM_TYPE_VALUE_INOUT,
370 					  TEE_PARAM_TYPE_VALUE_INPUT);
371 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
372 	struct bin_handle *binh = NULL;
373 	uint32_t num_rounded_bytes = 0;
374 	TEE_Result res = TEE_SUCCESS;
375 	struct file_slice *fs = NULL;
376 	bool file_is_locked = false;
377 	struct mobj *mobj = NULL;
378 	uint32_t offs_bytes = 0;
379 	uint32_t offs_pages = 0;
380 	uint32_t num_bytes = 0;
381 	uint32_t pad_begin = 0;
382 	uint32_t pad_end = 0;
383 	size_t num_pages = 0;
384 	uint32_t flags = 0;
385 	uint32_t prot = 0;
386 	vaddr_t va = 0;
387 
388 	if (exp_pt != param_types)
389 		return TEE_ERROR_BAD_PARAMETERS;
390 
391 	binh = handle_lookup(&ctx->db, params[0].value.a);
392 	if (!binh)
393 		return TEE_ERROR_BAD_PARAMETERS;
394 	flags = params[0].value.b;
395 	offs_bytes = params[1].value.a;
396 	num_bytes = params[1].value.b;
397 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
398 	pad_begin = params[3].value.a;
399 	pad_end = params[3].value.b;
400 
401 	if ((flags & accept_flags) != flags)
402 		return TEE_ERROR_BAD_PARAMETERS;
403 
404 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
405 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
406 		return TEE_ERROR_BAD_PARAMETERS;
407 
408 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
409 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
410 		return TEE_ERROR_BAD_PARAMETERS;
411 
412 	if (offs_bytes & SMALL_PAGE_MASK)
413 		return TEE_ERROR_BAD_PARAMETERS;
414 
415 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
416 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
417 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
418 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
419 		prot |= TEE_MATTR_UX;
420 
421 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
422 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
423 		return TEE_ERROR_BAD_PARAMETERS;
424 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
425 
426 	if (!file_trylock(binh->f)) {
427 		/*
428 		 * Before we can block on the file lock we must make all
429 		 * our page tables available for reclaiming in order to
430 		 * avoid a dead-lock with the other thread (which already
431 		 * is holding the file lock) mapping lots of memory below.
432 		 */
433 		vm_set_ctx(NULL);
434 		file_lock(binh->f);
435 		vm_set_ctx(s->ctx);
436 	}
437 	file_is_locked = true;
438 	fs = file_find_slice(binh->f, offs_pages);
439 	if (fs) {
440 		/* If there's registered slice it has to match */
441 		if (fs->page_offset != offs_pages ||
442 		    num_pages > fs->fobj->num_pages) {
443 			res = TEE_ERROR_BAD_PARAMETERS;
444 			goto err;
445 		}
446 
447 		/* If there's a slice we must be mapping shareable */
448 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
449 			res = TEE_ERROR_BAD_PARAMETERS;
450 			goto err;
451 		}
452 
453 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
454 		if (!mobj) {
455 			res = TEE_ERROR_OUT_OF_MEMORY;
456 			goto err;
457 		}
458 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
459 				 prot, VM_FLAG_READONLY,
460 				 mobj, 0, pad_begin, pad_end, 0);
461 		mobj_put(mobj);
462 		if (res)
463 			goto err;
464 	} else {
465 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
466 		struct file *file = NULL;
467 		uint32_t vm_flags = 0;
468 
469 		if (!f) {
470 			res = TEE_ERROR_OUT_OF_MEMORY;
471 			goto err;
472 		}
473 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
474 			file = binh->f;
475 			vm_flags |= VM_FLAG_READONLY;
476 		}
477 
478 		mobj = mobj_with_fobj_alloc(f, file);
479 		fobj_put(f);
480 		if (!mobj) {
481 			res = TEE_ERROR_OUT_OF_MEMORY;
482 			goto err;
483 		}
484 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
485 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
486 				 pad_begin, pad_end, 0);
487 		mobj_put(mobj);
488 		if (res)
489 			goto err;
490 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
491 		if (res)
492 			goto err_unmap_va;
493 		res = vm_set_prot(&utc->uctx, va, num_rounded_bytes,
494 				  prot);
495 		if (res)
496 			goto err_unmap_va;
497 
498 		/*
499 		 * The context currently is active set it again to update
500 		 * the mapping.
501 		 */
502 		vm_set_ctx(s->ctx);
503 
504 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
505 			res = file_add_slice(binh->f, f, offs_pages);
506 			if (res)
507 				goto err_unmap_va;
508 		}
509 	}
510 
511 	file_unlock(binh->f);
512 
513 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
514 	return TEE_SUCCESS;
515 
516 err_unmap_va:
517 	if (vm_unmap(&utc->uctx, va, num_rounded_bytes))
518 		panic();
519 
520 	/*
521 	 * The context currently is active set it again to update
522 	 * the mapping.
523 	 */
524 	vm_set_ctx(s->ctx);
525 
526 err:
527 	if (file_is_locked)
528 		file_unlock(binh->f);
529 
530 	return res;
531 }
532 
533 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
534 					     uint32_t param_types,
535 					     TEE_Param params[TEE_NUM_PARAMS])
536 {
537 	struct bin_handle *binh = NULL;
538 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
539 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
540 					  TEE_PARAM_TYPE_NONE,
541 					  TEE_PARAM_TYPE_NONE);
542 
543 	if (exp_pt != param_types)
544 		return TEE_ERROR_BAD_PARAMETERS;
545 
546 	binh = handle_lookup(&ctx->db, params[0].value.a);
547 	if (!binh)
548 		return TEE_ERROR_BAD_PARAMETERS;
549 
550 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
551 			    params[0].value.b, params[1].memref.size);
552 }
553 
554 static TEE_Result system_set_prot(struct ts_session *s,
555 				  uint32_t param_types,
556 				  TEE_Param params[TEE_NUM_PARAMS])
557 {
558 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
559 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
560 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
561 					  TEE_PARAM_TYPE_VALUE_INPUT,
562 					  TEE_PARAM_TYPE_NONE,
563 					  TEE_PARAM_TYPE_NONE);
564 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
565 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
566 	TEE_Result res = TEE_SUCCESS;
567 	uint32_t vm_flags = 0;
568 	uint32_t flags = 0;
569 	vaddr_t end_va = 0;
570 	vaddr_t va = 0;
571 	size_t sz = 0;
572 
573 	if (exp_pt != param_types)
574 		return TEE_ERROR_BAD_PARAMETERS;
575 
576 	flags = params[0].value.b;
577 
578 	if ((flags & accept_flags) != flags)
579 		return TEE_ERROR_BAD_PARAMETERS;
580 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
581 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
582 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
583 		prot |= TEE_MATTR_UX;
584 
585 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
586 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
587 
588 	/*
589 	 * The vm_get_flags() and vm_set_prot() are supposed to detect or
590 	 * handle overflow directly or indirectly. However, this function
591 	 * an API function so an extra guard here is in order. If nothing
592 	 * else to make it easier to review the code.
593 	 */
594 	if (ADD_OVERFLOW(va, sz, &end_va))
595 		return TEE_ERROR_BAD_PARAMETERS;
596 
597 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
598 	if (res)
599 		return res;
600 	if (vm_flags & VM_FLAG_PERMANENT)
601 		return TEE_ERROR_ACCESS_DENIED;
602 
603 	/*
604 	 * If the segment is a mapping of a part of a file (vm_flags &
605 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
606 	 * files are mapped read-only.
607 	 */
608 	if ((vm_flags & VM_FLAG_READONLY) &&
609 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
610 		return TEE_ERROR_ACCESS_DENIED;
611 
612 	return vm_set_prot(&utc->uctx, va, sz, prot);
613 }
614 
615 static TEE_Result system_remap(struct ts_session *s, uint32_t param_types,
616 			       TEE_Param params[TEE_NUM_PARAMS])
617 {
618 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
619 					  TEE_PARAM_TYPE_VALUE_INPUT,
620 					  TEE_PARAM_TYPE_VALUE_INOUT,
621 					  TEE_PARAM_TYPE_VALUE_INPUT);
622 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
623 	TEE_Result res = TEE_SUCCESS;
624 	uint32_t num_bytes = 0;
625 	uint32_t pad_begin = 0;
626 	uint32_t vm_flags = 0;
627 	uint32_t pad_end = 0;
628 	vaddr_t old_va = 0;
629 	vaddr_t new_va = 0;
630 
631 	if (exp_pt != param_types)
632 		return TEE_ERROR_BAD_PARAMETERS;
633 
634 	num_bytes = params[0].value.a;
635 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
636 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
637 	pad_begin = params[3].value.a;
638 	pad_end = params[3].value.b;
639 
640 	res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags);
641 	if (res)
642 		return res;
643 	if (vm_flags & VM_FLAG_PERMANENT)
644 		return TEE_ERROR_ACCESS_DENIED;
645 
646 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
647 		       pad_end);
648 	if (!res)
649 		reg_pair_from_64(new_va, &params[2].value.a,
650 				 &params[2].value.b);
651 
652 	return res;
653 }
654 
655 /* ldelf has the same architecture/register width as the kernel */
656 #ifdef ARM32
657 static const bool is_arm32 = true;
658 #else
659 static const bool is_arm32;
660 #endif
661 
662 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
663 				    uint32_t flags)
664 {
665 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
666 	TEE_Result res = TEE_ERROR_GENERIC;
667 	struct dl_entry_arg *arg = NULL;
668 	uint32_t panic_code = 0;
669 	uint32_t panicked = 0;
670 
671 	assert(uuid);
672 
673 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
674 	arg = (struct dl_entry_arg *)usr_stack;
675 
676 	res = vm_check_access_rights(&utc->uctx,
677 				     TEE_MEMORY_ACCESS_READ |
678 				     TEE_MEMORY_ACCESS_WRITE |
679 				     TEE_MEMORY_ACCESS_ANY_OWNER,
680 				     (uaddr_t)arg, sizeof(*arg));
681 	if (res) {
682 		EMSG("ldelf stack is inaccessible!");
683 		return res;
684 	}
685 
686 	memset(arg, 0, sizeof(*arg));
687 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
688 	arg->dlopen.uuid = *uuid;
689 	arg->dlopen.flags = flags;
690 
691 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
692 				     usr_stack, utc->dl_entry_func,
693 				     is_arm32, &panicked, &panic_code);
694 	if (panicked) {
695 		EMSG("ldelf dl_entry function panicked");
696 		abort_print_current_ta();
697 		res = TEE_ERROR_TARGET_DEAD;
698 	}
699 	if (!res)
700 		res = arg->ret;
701 
702 	return res;
703 }
704 
705 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
706 				   const char *sym, size_t maxlen, vaddr_t *val)
707 {
708 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
709 	TEE_Result res = TEE_ERROR_GENERIC;
710 	struct dl_entry_arg *arg = NULL;
711 	uint32_t panic_code = 0;
712 	uint32_t panicked = 0;
713 	size_t len = strnlen(sym, maxlen);
714 
715 	if (len == maxlen)
716 		return TEE_ERROR_BAD_PARAMETERS;
717 
718 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
719 	arg = (struct dl_entry_arg *)usr_stack;
720 
721 	res = vm_check_access_rights(&utc->uctx,
722 				     TEE_MEMORY_ACCESS_READ |
723 				     TEE_MEMORY_ACCESS_WRITE |
724 				     TEE_MEMORY_ACCESS_ANY_OWNER,
725 				     (uaddr_t)arg, sizeof(*arg) + len + 1);
726 	if (res) {
727 		EMSG("ldelf stack is inaccessible!");
728 		return res;
729 	}
730 
731 	memset(arg, 0, sizeof(*arg));
732 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
733 	arg->dlsym.uuid = *uuid;
734 	memcpy(arg->dlsym.symbol, sym, len);
735 	arg->dlsym.symbol[len] = '\0';
736 
737 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
738 				     usr_stack, utc->dl_entry_func,
739 				     is_arm32, &panicked, &panic_code);
740 	if (panicked) {
741 		EMSG("ldelf dl_entry function panicked");
742 		abort_print_current_ta();
743 		res = TEE_ERROR_TARGET_DEAD;
744 	}
745 	if (!res) {
746 		res = arg->ret;
747 		if (!res)
748 			*val = arg->dlsym.val;
749 	}
750 
751 	return res;
752 }
753 
754 static TEE_Result system_dlopen(struct ts_session *cs, uint32_t param_types,
755 				TEE_Param params[TEE_NUM_PARAMS])
756 {
757 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
758 					  TEE_PARAM_TYPE_VALUE_INPUT,
759 					  TEE_PARAM_TYPE_NONE,
760 					  TEE_PARAM_TYPE_NONE);
761 	TEE_Result res = TEE_ERROR_GENERIC;
762 	struct ts_session *s = NULL;
763 	struct user_ta_ctx *utc = NULL;
764 	TEE_UUID *uuid = NULL;
765 	uint32_t flags = 0;
766 
767 	if (exp_pt != param_types)
768 		return TEE_ERROR_BAD_PARAMETERS;
769 
770 	uuid = params[0].memref.buffer;
771 	if (!uuid || params[0].memref.size != sizeof(*uuid))
772 		return TEE_ERROR_BAD_PARAMETERS;
773 
774 	flags = params[1].value.a;
775 
776 	utc = to_user_ta_ctx(cs->ctx);
777 
778 	s = ts_pop_current_session();
779 	res = call_ldelf_dlopen(utc, uuid, flags);
780 	ts_push_current_session(s);
781 
782 	return res;
783 }
784 
785 static TEE_Result system_dlsym(struct ts_session *cs, uint32_t param_types,
786 			       TEE_Param params[TEE_NUM_PARAMS])
787 {
788 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
789 					  TEE_PARAM_TYPE_MEMREF_INPUT,
790 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
791 					  TEE_PARAM_TYPE_NONE);
792 	TEE_Result res = TEE_ERROR_GENERIC;
793 	struct ts_session *s = NULL;
794 	struct user_ta_ctx *utc = NULL;
795 	const char *sym = NULL;
796 	TEE_UUID *uuid = NULL;
797 	size_t maxlen = 0;
798 	vaddr_t va = 0;
799 
800 	if (exp_pt != param_types)
801 		return TEE_ERROR_BAD_PARAMETERS;
802 
803 	uuid = params[0].memref.buffer;
804 	if (uuid && params[0].memref.size != sizeof(*uuid))
805 		return TEE_ERROR_BAD_PARAMETERS;
806 
807 	sym = params[1].memref.buffer;
808 	if (!sym)
809 		return TEE_ERROR_BAD_PARAMETERS;
810 	maxlen = params[1].memref.size;
811 
812 	utc = to_user_ta_ctx(cs->ctx);
813 
814 	s = ts_pop_current_session();
815 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
816 	ts_push_current_session(s);
817 
818 	if (!res)
819 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
820 
821 	return res;
822 }
823 
824 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
825 					   TEE_Param params[TEE_NUM_PARAMS])
826 {
827 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
828 					  TEE_PARAM_TYPE_NONE,
829 					  TEE_PARAM_TYPE_NONE,
830 					  TEE_PARAM_TYPE_NONE);
831 	size_t size = 0;
832 	TEE_Result res = TEE_SUCCESS;
833 
834 	if (exp_pt != param_types)
835 		return TEE_ERROR_BAD_PARAMETERS;
836 
837 	size = params[0].memref.size;
838 	res = tpm_get_event_log(params[0].memref.buffer, &size);
839 	params[0].memref.size = size;
840 
841 	return res;
842 }
843 
844 static TEE_Result open_session(uint32_t param_types __unused,
845 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
846 			       void **sess_ctx)
847 {
848 	struct ts_session *s = NULL;
849 	struct system_ctx *ctx = NULL;
850 
851 	/* Check that we're called from a user TA */
852 	s = ts_get_calling_session();
853 	if (!s)
854 		return TEE_ERROR_ACCESS_DENIED;
855 	if (!is_user_ta_ctx(s->ctx))
856 		return TEE_ERROR_ACCESS_DENIED;
857 
858 	ctx = calloc(1, sizeof(*ctx));
859 	if (!ctx)
860 		return TEE_ERROR_OUT_OF_MEMORY;
861 
862 	*sess_ctx = ctx;
863 
864 	return TEE_SUCCESS;
865 }
866 
867 static void close_session(void *sess_ctx)
868 {
869 	struct system_ctx *ctx = sess_ctx;
870 
871 	handle_db_destroy(&ctx->db, ta_bin_close);
872 	free(ctx);
873 }
874 
875 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
876 				 uint32_t param_types,
877 				 TEE_Param params[TEE_NUM_PARAMS])
878 {
879 	struct ts_session *s = ts_get_calling_session();
880 
881 	switch (cmd_id) {
882 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
883 		return system_rng_reseed(s, param_types, params);
884 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
885 		return system_derive_ta_unique_key(s, param_types, params);
886 	case PTA_SYSTEM_MAP_ZI:
887 		return system_map_zi(s, param_types, params);
888 	case PTA_SYSTEM_UNMAP:
889 		return system_unmap(s, param_types, params);
890 	case PTA_SYSTEM_OPEN_TA_BINARY:
891 		return system_open_ta_binary(sess_ctx, param_types, params);
892 	case PTA_SYSTEM_CLOSE_TA_BINARY:
893 		return system_close_ta_binary(sess_ctx, param_types, params);
894 	case PTA_SYSTEM_MAP_TA_BINARY:
895 		return system_map_ta_binary(sess_ctx, s, param_types, params);
896 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
897 		return system_copy_from_ta_binary(sess_ctx, param_types,
898 						  params);
899 	case PTA_SYSTEM_SET_PROT:
900 		return system_set_prot(s, param_types, params);
901 	case PTA_SYSTEM_REMAP:
902 		return system_remap(s, param_types, params);
903 	case PTA_SYSTEM_DLOPEN:
904 		return system_dlopen(s, param_types, params);
905 	case PTA_SYSTEM_DLSYM:
906 		return system_dlsym(s, param_types, params);
907 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
908 		return system_get_tpm_event_log(param_types, params);
909 	default:
910 		break;
911 	}
912 
913 	return TEE_ERROR_NOT_IMPLEMENTED;
914 }
915 
916 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
917 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
918 		   .open_session_entry_point = open_session,
919 		   .close_session_entry_point = close_session,
920 		   .invoke_command_entry_point = invoke_command);
921