xref: /optee_os/core/pta/system.c (revision a8e39e9c3da6dc7a0c962d465d0a826beef563fd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <crypto/crypto.h>
8 #include <kernel/handle.h>
9 #include <kernel/huk_subkey.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/pseudo_ta.h>
13 #include <kernel/tpm.h>
14 #include <kernel/user_ta.h>
15 #include <kernel/user_ta_store.h>
16 #include <ldelf.h>
17 #include <mm/file.h>
18 #include <mm/fobj.h>
19 #include <mm/tee_mmu.h>
20 #include <pta_system.h>
21 #include <stdlib_ext.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <tee_api_defines_extensions.h>
25 #include <tee_api_defines.h>
26 #include <util.h>
27 
28 struct bin_handle {
29 	const struct user_ta_store_ops *op;
30 	struct user_ta_store_handle *h;
31 	struct file *f;
32 	size_t offs_bytes;
33 	size_t size_bytes;
34 };
35 
36 struct system_ctx {
37 	struct handle_db db;
38 	const struct user_ta_store_ops *store_op;
39 };
40 
41 static unsigned int system_pnum;
42 
43 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused,
44 				uint32_t param_types,
45 				TEE_Param params[TEE_NUM_PARAMS])
46 {
47 	size_t entropy_sz;
48 	uint8_t *entropy_input;
49 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
50 					  TEE_PARAM_TYPE_NONE,
51 					  TEE_PARAM_TYPE_NONE,
52 					  TEE_PARAM_TYPE_NONE);
53 
54 	if (exp_pt != param_types)
55 		return TEE_ERROR_BAD_PARAMETERS;
56 	entropy_input = params[0].memref.buffer;
57 	entropy_sz = params[0].memref.size;
58 
59 	if (!entropy_sz || !entropy_input)
60 		return TEE_ERROR_BAD_PARAMETERS;
61 
62 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
63 			     entropy_input, entropy_sz);
64 	return TEE_SUCCESS;
65 }
66 
67 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s,
68 					      uint32_t param_types,
69 					      TEE_Param params[TEE_NUM_PARAMS])
70 {
71 	size_t data_len = sizeof(TEE_UUID);
72 	TEE_Result res = TEE_ERROR_GENERIC;
73 	uint8_t *data = NULL;
74 	uint32_t access_flags = 0;
75 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
76 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
77 					  TEE_PARAM_TYPE_NONE,
78 					  TEE_PARAM_TYPE_NONE);
79 	struct user_ta_ctx *utc = NULL;
80 
81 	if (exp_pt != param_types)
82 		return TEE_ERROR_BAD_PARAMETERS;
83 
84 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
85 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
86 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
87 		return TEE_ERROR_BAD_PARAMETERS;
88 
89 	utc = to_user_ta_ctx(s->ctx);
90 
91 	/*
92 	 * The derived key shall not end up in non-secure memory by
93 	 * mistake.
94 	 *
95 	 * Note that we're allowing shared memory as long as it's
96 	 * secure. This is needed because a TA always uses shared memory
97 	 * when communicating with another TA.
98 	 */
99 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
100 		       TEE_MEMORY_ACCESS_SECURE;
101 	res = tee_mmu_check_access_rights(&utc->uctx, access_flags,
102 					  (uaddr_t)params[1].memref.buffer,
103 					  params[1].memref.size);
104 	if (res != TEE_SUCCESS)
105 		return TEE_ERROR_SECURITY;
106 
107 	/* Take extra data into account. */
108 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
109 		return TEE_ERROR_SECURITY;
110 
111 	data = calloc(data_len, 1);
112 	if (!data)
113 		return TEE_ERROR_OUT_OF_MEMORY;
114 
115 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
116 
117 	/* Append the user provided data */
118 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
119 	       params[0].memref.size);
120 
121 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
122 				params[1].memref.buffer,
123 				params[1].memref.size);
124 	free_wipe(data);
125 
126 	return res;
127 }
128 
129 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types,
130 				TEE_Param params[TEE_NUM_PARAMS])
131 {
132 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
133 					  TEE_PARAM_TYPE_VALUE_INOUT,
134 					  TEE_PARAM_TYPE_VALUE_INPUT,
135 					  TEE_PARAM_TYPE_NONE);
136 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
137 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
138 	TEE_Result res = TEE_ERROR_GENERIC;
139 	struct mobj *mobj = NULL;
140 	uint32_t pad_begin = 0;
141 	uint32_t vm_flags = 0;
142 	struct fobj *f = NULL;
143 	uint32_t pad_end = 0;
144 	size_t num_bytes = 0;
145 	vaddr_t va = 0;
146 
147 	if (exp_pt != param_types)
148 		return TEE_ERROR_BAD_PARAMETERS;
149 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
150 		return TEE_ERROR_BAD_PARAMETERS;
151 
152 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
153 		vm_flags |= VM_FLAG_SHAREABLE;
154 
155 	num_bytes = params[0].value.a;
156 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
157 	pad_begin = params[2].value.a;
158 	pad_end = params[2].value.b;
159 
160 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
161 	if (!f)
162 		return TEE_ERROR_OUT_OF_MEMORY;
163 	mobj = mobj_with_fobj_alloc(f, NULL);
164 	fobj_put(f);
165 	if (!mobj)
166 		return TEE_ERROR_OUT_OF_MEMORY;
167 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
168 			 mobj, 0, pad_begin, pad_end);
169 	mobj_put(mobj);
170 	if (!res)
171 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
172 
173 	return res;
174 }
175 
176 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types,
177 			       TEE_Param params[TEE_NUM_PARAMS])
178 {
179 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
180 					  TEE_PARAM_TYPE_VALUE_INPUT,
181 					  TEE_PARAM_TYPE_NONE,
182 					  TEE_PARAM_TYPE_NONE);
183 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
184 	TEE_Result res = TEE_SUCCESS;
185 	uint32_t vm_flags = 0;
186 	vaddr_t va = 0;
187 	size_t sz = 0;
188 
189 	if (exp_pt != param_types)
190 		return TEE_ERROR_BAD_PARAMETERS;
191 
192 	if (params[0].value.b)
193 		return TEE_ERROR_BAD_PARAMETERS;
194 
195 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
196 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
197 
198 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
199 	if (res)
200 		return res;
201 	if (vm_flags & VM_FLAG_PERMANENT)
202 		return TEE_ERROR_ACCESS_DENIED;
203 
204 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz);
205 }
206 
207 static void ta_bin_close(void *ptr)
208 {
209 	struct bin_handle *binh = ptr;
210 
211 	if (binh) {
212 		if (binh->op && binh->h)
213 			binh->op->close(binh->h);
214 		file_put(binh->f);
215 	}
216 	free(binh);
217 }
218 
219 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
220 					uint32_t param_types,
221 					TEE_Param params[TEE_NUM_PARAMS])
222 {
223 	TEE_Result res = TEE_SUCCESS;
224 	struct bin_handle *binh = NULL;
225 	int h = 0;
226 	TEE_UUID *uuid = NULL;
227 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
228 	unsigned int tag_len = sizeof(tag);
229 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
230 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
231 					  TEE_PARAM_TYPE_NONE,
232 					  TEE_PARAM_TYPE_NONE);
233 
234 	if (exp_pt != param_types)
235 		return TEE_ERROR_BAD_PARAMETERS;
236 	if (params[0].memref.size != sizeof(*uuid))
237 		return TEE_ERROR_BAD_PARAMETERS;
238 
239 	uuid = params[0].memref.buffer;
240 
241 	binh = calloc(1, sizeof(*binh));
242 	if (!binh)
243 		return TEE_ERROR_OUT_OF_MEMORY;
244 
245 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) {
246 		DMSG("Lookup user TA ELF %pUl (%s)",
247 		     (void *)uuid, binh->op->description);
248 
249 		res = binh->op->open(uuid, &binh->h);
250 		DMSG("res=0x%x", res);
251 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
252 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
253 			break;
254 	}
255 	if (res)
256 		goto err;
257 
258 	res = binh->op->get_size(binh->h, &binh->size_bytes);
259 	if (res)
260 		goto err;
261 	res = binh->op->get_tag(binh->h, tag, &tag_len);
262 	if (res)
263 		goto err;
264 	binh->f = file_get_by_tag(tag, tag_len);
265 	if (!binh->f)
266 		goto err_oom;
267 
268 	h = handle_get(&ctx->db, binh);
269 	if (h < 0)
270 		goto err_oom;
271 	params[0].value.a = h;
272 
273 	return TEE_SUCCESS;
274 err_oom:
275 	res = TEE_ERROR_OUT_OF_MEMORY;
276 err:
277 	ta_bin_close(binh);
278 	return res;
279 }
280 
281 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
282 					 uint32_t param_types,
283 					 TEE_Param params[TEE_NUM_PARAMS])
284 {
285 	TEE_Result res = TEE_SUCCESS;
286 	struct bin_handle *binh = NULL;
287 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
288 					  TEE_PARAM_TYPE_NONE,
289 					  TEE_PARAM_TYPE_NONE,
290 					  TEE_PARAM_TYPE_NONE);
291 
292 	if (exp_pt != param_types)
293 		return TEE_ERROR_BAD_PARAMETERS;
294 
295 	if (params[0].value.b)
296 		return TEE_ERROR_BAD_PARAMETERS;
297 
298 	binh = handle_put(&ctx->db, params[0].value.a);
299 	if (!binh)
300 		return TEE_ERROR_BAD_PARAMETERS;
301 
302 	if (binh->offs_bytes < binh->size_bytes)
303 		res = binh->op->read(binh->h, NULL,
304 				     binh->size_bytes - binh->offs_bytes);
305 
306 	ta_bin_close(binh);
307 	return res;
308 }
309 
310 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
311 			       size_t offs_bytes, size_t num_bytes)
312 {
313 	TEE_Result res = TEE_SUCCESS;
314 	size_t next_offs = 0;
315 
316 	if (offs_bytes < binh->offs_bytes)
317 		return TEE_ERROR_BAD_STATE;
318 
319 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
320 		return TEE_ERROR_BAD_PARAMETERS;
321 
322 	if (offs_bytes > binh->offs_bytes) {
323 		res = binh->op->read(binh->h, NULL,
324 				     offs_bytes - binh->offs_bytes);
325 		if (res)
326 			return res;
327 		binh->offs_bytes = offs_bytes;
328 	}
329 
330 	if (next_offs > binh->size_bytes) {
331 		size_t rb = binh->size_bytes - binh->offs_bytes;
332 
333 		res = binh->op->read(binh->h, (void *)va, rb);
334 		if (res)
335 			return res;
336 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
337 		binh->offs_bytes = binh->size_bytes;
338 	} else {
339 		res = binh->op->read(binh->h, (void *)va, num_bytes);
340 		if (res)
341 			return res;
342 		binh->offs_bytes = next_offs;
343 	}
344 
345 	return TEE_SUCCESS;
346 }
347 
348 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
349 				       struct tee_ta_session *s,
350 				       uint32_t param_types,
351 				       TEE_Param params[TEE_NUM_PARAMS])
352 {
353 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
354 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
355 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
356 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
357 					  TEE_PARAM_TYPE_VALUE_INPUT,
358 					  TEE_PARAM_TYPE_VALUE_INOUT,
359 					  TEE_PARAM_TYPE_VALUE_INPUT);
360 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
361 	struct bin_handle *binh = NULL;
362 	uint32_t num_rounded_bytes = 0;
363 	TEE_Result res = TEE_SUCCESS;
364 	struct file_slice *fs = NULL;
365 	bool file_is_locked = false;
366 	struct mobj *mobj = NULL;
367 	uint32_t offs_bytes = 0;
368 	uint32_t offs_pages = 0;
369 	uint32_t num_bytes = 0;
370 	uint32_t pad_begin = 0;
371 	uint32_t pad_end = 0;
372 	size_t num_pages = 0;
373 	uint32_t flags = 0;
374 	uint32_t prot = 0;
375 	vaddr_t va = 0;
376 
377 	if (exp_pt != param_types)
378 		return TEE_ERROR_BAD_PARAMETERS;
379 
380 	binh = handle_lookup(&ctx->db, params[0].value.a);
381 	if (!binh)
382 		return TEE_ERROR_BAD_PARAMETERS;
383 	flags = params[0].value.b;
384 	offs_bytes = params[1].value.a;
385 	num_bytes = params[1].value.b;
386 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
387 	pad_begin = params[3].value.a;
388 	pad_end = params[3].value.b;
389 
390 	if ((flags & accept_flags) != flags)
391 		return TEE_ERROR_BAD_PARAMETERS;
392 
393 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
394 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
395 		return TEE_ERROR_BAD_PARAMETERS;
396 
397 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
398 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
399 		return TEE_ERROR_BAD_PARAMETERS;
400 
401 	if (offs_bytes & SMALL_PAGE_MASK)
402 		return TEE_ERROR_BAD_PARAMETERS;
403 
404 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
405 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
406 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
407 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
408 		prot |= TEE_MATTR_UX;
409 
410 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
411 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
412 		return TEE_ERROR_BAD_PARAMETERS;
413 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
414 
415 	if (!file_trylock(binh->f)) {
416 		/*
417 		 * Before we can block on the file lock we must make all
418 		 * our page tables available for reclaiming in order to
419 		 * avoid a dead-lock with the other thread (which already
420 		 * is holding the file lock) mapping lots of memory below.
421 		 */
422 		tee_mmu_set_ctx(NULL);
423 		file_lock(binh->f);
424 		tee_mmu_set_ctx(s->ctx);
425 	}
426 	file_is_locked = true;
427 	fs = file_find_slice(binh->f, offs_pages);
428 	if (fs) {
429 		/* If there's registered slice it has to match */
430 		if (fs->page_offset != offs_pages ||
431 		    num_pages > fs->fobj->num_pages) {
432 			res = TEE_ERROR_BAD_PARAMETERS;
433 			goto err;
434 		}
435 
436 		/* If there's a slice we must be mapping shareable */
437 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
438 			res = TEE_ERROR_BAD_PARAMETERS;
439 			goto err;
440 		}
441 
442 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
443 		if (!mobj) {
444 			res = TEE_ERROR_OUT_OF_MEMORY;
445 			goto err;
446 		}
447 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
448 				 prot, VM_FLAG_READONLY,
449 				 mobj, 0, pad_begin, pad_end);
450 		mobj_put(mobj);
451 		if (res)
452 			goto err;
453 	} else {
454 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
455 		struct file *file = NULL;
456 		uint32_t vm_flags = 0;
457 
458 		if (!f) {
459 			res = TEE_ERROR_OUT_OF_MEMORY;
460 			goto err;
461 		}
462 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
463 			file = binh->f;
464 			vm_flags |= VM_FLAG_READONLY;
465 		}
466 
467 		mobj = mobj_with_fobj_alloc(f, file);
468 		fobj_put(f);
469 		if (!mobj) {
470 			res = TEE_ERROR_OUT_OF_MEMORY;
471 			goto err;
472 		}
473 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
474 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
475 				 pad_begin, pad_end);
476 		mobj_put(mobj);
477 		if (res)
478 			goto err;
479 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
480 		if (res)
481 			goto err_unmap_va;
482 		res = vm_set_prot(&utc->uctx, va, num_rounded_bytes,
483 				  prot);
484 		if (res)
485 			goto err_unmap_va;
486 
487 		/*
488 		 * The context currently is active set it again to update
489 		 * the mapping.
490 		 */
491 		tee_mmu_set_ctx(s->ctx);
492 
493 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
494 			res = file_add_slice(binh->f, f, offs_pages);
495 			if (res)
496 				goto err_unmap_va;
497 		}
498 	}
499 
500 	file_unlock(binh->f);
501 
502 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
503 	return TEE_SUCCESS;
504 
505 err_unmap_va:
506 	if (vm_unmap(&utc->uctx, va, num_rounded_bytes))
507 		panic();
508 
509 	/*
510 	 * The context currently is active set it again to update
511 	 * the mapping.
512 	 */
513 	tee_mmu_set_ctx(s->ctx);
514 
515 err:
516 	if (file_is_locked)
517 		file_unlock(binh->f);
518 
519 	return res;
520 }
521 
522 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
523 					     uint32_t param_types,
524 					     TEE_Param params[TEE_NUM_PARAMS])
525 {
526 	struct bin_handle *binh = NULL;
527 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
528 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
529 					  TEE_PARAM_TYPE_NONE,
530 					  TEE_PARAM_TYPE_NONE);
531 
532 	if (exp_pt != param_types)
533 		return TEE_ERROR_BAD_PARAMETERS;
534 
535 	binh = handle_lookup(&ctx->db, params[0].value.a);
536 	if (!binh)
537 		return TEE_ERROR_BAD_PARAMETERS;
538 
539 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
540 			    params[0].value.b, params[1].memref.size);
541 }
542 
543 static TEE_Result system_set_prot(struct tee_ta_session *s,
544 				  uint32_t param_types,
545 				  TEE_Param params[TEE_NUM_PARAMS])
546 {
547 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
548 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
549 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
550 					  TEE_PARAM_TYPE_VALUE_INPUT,
551 					  TEE_PARAM_TYPE_NONE,
552 					  TEE_PARAM_TYPE_NONE);
553 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
554 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
555 	TEE_Result res = TEE_SUCCESS;
556 	uint32_t vm_flags = 0;
557 	uint32_t flags = 0;
558 	vaddr_t end_va = 0;
559 	vaddr_t va = 0;
560 	size_t sz = 0;
561 
562 	if (exp_pt != param_types)
563 		return TEE_ERROR_BAD_PARAMETERS;
564 
565 	flags = params[0].value.b;
566 
567 	if ((flags & accept_flags) != flags)
568 		return TEE_ERROR_BAD_PARAMETERS;
569 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
570 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
571 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
572 		prot |= TEE_MATTR_UX;
573 
574 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
575 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
576 
577 	/*
578 	 * The vm_get_flags() and vm_set_prot() are supposed to detect or
579 	 * handle overflow directly or indirectly. However, this function
580 	 * an API function so an extra guard here is in order. If nothing
581 	 * else to make it easier to review the code.
582 	 */
583 	if (ADD_OVERFLOW(va, sz, &end_va))
584 		return TEE_ERROR_BAD_PARAMETERS;
585 
586 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
587 	if (res)
588 		return res;
589 	if (vm_flags & VM_FLAG_PERMANENT)
590 		return TEE_ERROR_ACCESS_DENIED;
591 
592 	/*
593 	 * If the segment is a mapping of a part of a file (vm_flags &
594 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
595 	 * files are mapped read-only.
596 	 */
597 	if ((vm_flags & VM_FLAG_READONLY) &&
598 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
599 		return TEE_ERROR_ACCESS_DENIED;
600 
601 	return vm_set_prot(&utc->uctx, va, sz, prot);
602 }
603 
604 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types,
605 			       TEE_Param params[TEE_NUM_PARAMS])
606 {
607 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
608 					  TEE_PARAM_TYPE_VALUE_INPUT,
609 					  TEE_PARAM_TYPE_VALUE_INOUT,
610 					  TEE_PARAM_TYPE_VALUE_INPUT);
611 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
612 	TEE_Result res = TEE_SUCCESS;
613 	uint32_t num_bytes = 0;
614 	uint32_t pad_begin = 0;
615 	uint32_t vm_flags = 0;
616 	uint32_t pad_end = 0;
617 	vaddr_t old_va = 0;
618 	vaddr_t new_va = 0;
619 
620 	if (exp_pt != param_types)
621 		return TEE_ERROR_BAD_PARAMETERS;
622 
623 	num_bytes = params[0].value.a;
624 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
625 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
626 	pad_begin = params[3].value.a;
627 	pad_end = params[3].value.b;
628 
629 	res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags);
630 	if (res)
631 		return res;
632 	if (vm_flags & VM_FLAG_PERMANENT)
633 		return TEE_ERROR_ACCESS_DENIED;
634 
635 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
636 		       pad_end);
637 	if (!res)
638 		reg_pair_from_64(new_va, &params[2].value.a,
639 				 &params[2].value.b);
640 
641 	return res;
642 }
643 
644 /* ldelf has the same architecture/register width as the kernel */
645 #ifdef ARM32
646 static const bool is_arm32 = true;
647 #else
648 static const bool is_arm32;
649 #endif
650 
651 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
652 				    uint32_t flags)
653 {
654 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
655 	TEE_Result res = TEE_ERROR_GENERIC;
656 	struct dl_entry_arg *arg = NULL;
657 	uint32_t panic_code = 0;
658 	uint32_t panicked = 0;
659 
660 	assert(uuid);
661 
662 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
663 	arg = (struct dl_entry_arg *)usr_stack;
664 
665 	res = tee_mmu_check_access_rights(&utc->uctx,
666 					  TEE_MEMORY_ACCESS_READ |
667 					  TEE_MEMORY_ACCESS_WRITE |
668 					  TEE_MEMORY_ACCESS_ANY_OWNER,
669 					  (uaddr_t)arg, sizeof(*arg));
670 	if (res) {
671 		EMSG("ldelf stack is inaccessible!");
672 		return res;
673 	}
674 
675 	memset(arg, 0, sizeof(*arg));
676 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
677 	arg->dlopen.uuid = *uuid;
678 	arg->dlopen.flags = flags;
679 
680 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
681 				     usr_stack, utc->dl_entry_func,
682 				     is_arm32, &panicked, &panic_code);
683 	if (panicked) {
684 		EMSG("ldelf dl_entry function panicked");
685 		abort_print_current_ta();
686 		res = TEE_ERROR_TARGET_DEAD;
687 	}
688 	if (!res)
689 		res = arg->ret;
690 
691 	return res;
692 }
693 
694 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
695 				   const char *sym, size_t maxlen, vaddr_t *val)
696 {
697 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
698 	TEE_Result res = TEE_ERROR_GENERIC;
699 	struct dl_entry_arg *arg = NULL;
700 	uint32_t panic_code = 0;
701 	uint32_t panicked = 0;
702 	size_t len = strnlen(sym, maxlen);
703 
704 	if (len == maxlen)
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
708 	arg = (struct dl_entry_arg *)usr_stack;
709 
710 	res = tee_mmu_check_access_rights(&utc->uctx,
711 					  TEE_MEMORY_ACCESS_READ |
712 					  TEE_MEMORY_ACCESS_WRITE |
713 					  TEE_MEMORY_ACCESS_ANY_OWNER,
714 					  (uaddr_t)arg, sizeof(*arg) + len + 1);
715 	if (res) {
716 		EMSG("ldelf stack is inaccessible!");
717 		return res;
718 	}
719 
720 	memset(arg, 0, sizeof(*arg));
721 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
722 	arg->dlsym.uuid = *uuid;
723 	memcpy(arg->dlsym.symbol, sym, len);
724 	arg->dlsym.symbol[len] = '\0';
725 
726 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
727 				     usr_stack, utc->dl_entry_func,
728 				     is_arm32, &panicked, &panic_code);
729 	if (panicked) {
730 		EMSG("ldelf dl_entry function panicked");
731 		abort_print_current_ta();
732 		res = TEE_ERROR_TARGET_DEAD;
733 	}
734 	if (!res) {
735 		res = arg->ret;
736 		if (!res)
737 			*val = arg->dlsym.val;
738 	}
739 
740 	return res;
741 }
742 
743 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types,
744 				TEE_Param params[TEE_NUM_PARAMS])
745 {
746 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
747 					  TEE_PARAM_TYPE_VALUE_INPUT,
748 					  TEE_PARAM_TYPE_NONE,
749 					  TEE_PARAM_TYPE_NONE);
750 	TEE_Result res = TEE_ERROR_GENERIC;
751 	struct tee_ta_session *s = NULL;
752 	struct user_ta_ctx *utc = NULL;
753 	TEE_UUID *uuid = NULL;
754 	uint32_t flags = 0;
755 
756 	if (exp_pt != param_types)
757 		return TEE_ERROR_BAD_PARAMETERS;
758 
759 	uuid = params[0].memref.buffer;
760 	if (!uuid || params[0].memref.size != sizeof(*uuid))
761 		return TEE_ERROR_BAD_PARAMETERS;
762 
763 	flags = params[1].value.a;
764 
765 	utc = to_user_ta_ctx(cs->ctx);
766 
767 	s = tee_ta_pop_current_session();
768 	res = call_ldelf_dlopen(utc, uuid, flags);
769 	tee_ta_push_current_session(s);
770 
771 	return res;
772 }
773 
774 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types,
775 			       TEE_Param params[TEE_NUM_PARAMS])
776 {
777 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
778 					  TEE_PARAM_TYPE_MEMREF_INPUT,
779 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
780 					  TEE_PARAM_TYPE_NONE);
781 	TEE_Result res = TEE_ERROR_GENERIC;
782 	struct tee_ta_session *s = NULL;
783 	struct user_ta_ctx *utc = NULL;
784 	const char *sym = NULL;
785 	TEE_UUID *uuid = NULL;
786 	size_t maxlen = 0;
787 	vaddr_t va = 0;
788 
789 	if (exp_pt != param_types)
790 		return TEE_ERROR_BAD_PARAMETERS;
791 
792 	uuid = params[0].memref.buffer;
793 	if (uuid && params[0].memref.size != sizeof(*uuid))
794 		return TEE_ERROR_BAD_PARAMETERS;
795 
796 	sym = params[1].memref.buffer;
797 	if (!sym)
798 		return TEE_ERROR_BAD_PARAMETERS;
799 	maxlen = params[1].memref.size;
800 
801 	utc = to_user_ta_ctx(cs->ctx);
802 
803 	s = tee_ta_pop_current_session();
804 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
805 	tee_ta_push_current_session(s);
806 
807 	if (!res)
808 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
809 
810 	return res;
811 }
812 
813 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
814 					   TEE_Param params[TEE_NUM_PARAMS])
815 {
816 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
817 					  TEE_PARAM_TYPE_NONE,
818 					  TEE_PARAM_TYPE_NONE,
819 					  TEE_PARAM_TYPE_NONE);
820 	size_t size = 0;
821 	TEE_Result res = TEE_SUCCESS;
822 
823 	if (exp_pt != param_types)
824 		return TEE_ERROR_BAD_PARAMETERS;
825 
826 	size = params[0].memref.size;
827 	res = tpm_get_event_log(params[0].memref.buffer, &size);
828 	params[0].memref.size = size;
829 
830 	return res;
831 }
832 
833 static TEE_Result open_session(uint32_t param_types __unused,
834 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
835 			       void **sess_ctx)
836 {
837 	struct tee_ta_session *s = NULL;
838 	struct system_ctx *ctx = NULL;
839 
840 	/* Check that we're called from a user TA */
841 	s = tee_ta_get_calling_session();
842 	if (!s)
843 		return TEE_ERROR_ACCESS_DENIED;
844 	if (!is_user_ta_ctx(s->ctx))
845 		return TEE_ERROR_ACCESS_DENIED;
846 
847 	ctx = calloc(1, sizeof(*ctx));
848 	if (!ctx)
849 		return TEE_ERROR_OUT_OF_MEMORY;
850 
851 	*sess_ctx = ctx;
852 
853 	return TEE_SUCCESS;
854 }
855 
856 static void close_session(void *sess_ctx)
857 {
858 	struct system_ctx *ctx = sess_ctx;
859 
860 	handle_db_destroy(&ctx->db, ta_bin_close);
861 	free(ctx);
862 }
863 
864 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
865 				 uint32_t param_types,
866 				 TEE_Param params[TEE_NUM_PARAMS])
867 {
868 	struct tee_ta_session *s = tee_ta_get_calling_session();
869 
870 	switch (cmd_id) {
871 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
872 		return system_rng_reseed(s, param_types, params);
873 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
874 		return system_derive_ta_unique_key(s, param_types, params);
875 	case PTA_SYSTEM_MAP_ZI:
876 		return system_map_zi(s, param_types, params);
877 	case PTA_SYSTEM_UNMAP:
878 		return system_unmap(s, param_types, params);
879 	case PTA_SYSTEM_OPEN_TA_BINARY:
880 		return system_open_ta_binary(sess_ctx, param_types, params);
881 	case PTA_SYSTEM_CLOSE_TA_BINARY:
882 		return system_close_ta_binary(sess_ctx, param_types, params);
883 	case PTA_SYSTEM_MAP_TA_BINARY:
884 		return system_map_ta_binary(sess_ctx, s, param_types, params);
885 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
886 		return system_copy_from_ta_binary(sess_ctx, param_types,
887 						  params);
888 	case PTA_SYSTEM_SET_PROT:
889 		return system_set_prot(s, param_types, params);
890 	case PTA_SYSTEM_REMAP:
891 		return system_remap(s, param_types, params);
892 	case PTA_SYSTEM_DLOPEN:
893 		return system_dlopen(s, param_types, params);
894 	case PTA_SYSTEM_DLSYM:
895 		return system_dlsym(s, param_types, params);
896 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
897 		return system_get_tpm_event_log(param_types, params);
898 	default:
899 		break;
900 	}
901 
902 	return TEE_ERROR_NOT_IMPLEMENTED;
903 }
904 
905 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
906 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
907 		   .open_session_entry_point = open_session,
908 		   .close_session_entry_point = close_session,
909 		   .invoke_command_entry_point = invoke_command);
910