xref: /optee_os/core/pta/system.c (revision 18871ad0cecba19a366008e6a9bf4cf724750094)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <crypto/crypto.h>
8 #include <kernel/handle.h>
9 #include <kernel/huk_subkey.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/pseudo_ta.h>
13 #include <kernel/user_ta.h>
14 #include <kernel/user_ta_store.h>
15 #include <ldelf.h>
16 #include <mm/file.h>
17 #include <mm/fobj.h>
18 #include <mm/tee_mmu.h>
19 #include <pta_system.h>
20 #include <string.h>
21 #include <tee_api_defines_extensions.h>
22 #include <tee_api_defines.h>
23 #include <util.h>
24 #include <kernel/tpm.h>
25 
26 struct bin_handle {
27 	const struct user_ta_store_ops *op;
28 	struct user_ta_store_handle *h;
29 	struct file *f;
30 	size_t offs_bytes;
31 	size_t size_bytes;
32 };
33 
34 struct system_ctx {
35 	struct handle_db db;
36 	const struct user_ta_store_ops *store_op;
37 };
38 
39 static unsigned int system_pnum;
40 
41 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused,
42 				uint32_t param_types,
43 				TEE_Param params[TEE_NUM_PARAMS])
44 {
45 	size_t entropy_sz;
46 	uint8_t *entropy_input;
47 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
48 					  TEE_PARAM_TYPE_NONE,
49 					  TEE_PARAM_TYPE_NONE,
50 					  TEE_PARAM_TYPE_NONE);
51 
52 	if (exp_pt != param_types)
53 		return TEE_ERROR_BAD_PARAMETERS;
54 	entropy_input = params[0].memref.buffer;
55 	entropy_sz = params[0].memref.size;
56 
57 	if (!entropy_sz || !entropy_input)
58 		return TEE_ERROR_BAD_PARAMETERS;
59 
60 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
61 			     entropy_input, entropy_sz);
62 	return TEE_SUCCESS;
63 }
64 
65 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s,
66 					      uint32_t param_types,
67 					      TEE_Param params[TEE_NUM_PARAMS])
68 {
69 	size_t data_len = sizeof(TEE_UUID);
70 	TEE_Result res = TEE_ERROR_GENERIC;
71 	uint8_t *data = NULL;
72 	uint32_t access_flags = 0;
73 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
74 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
75 					  TEE_PARAM_TYPE_NONE,
76 					  TEE_PARAM_TYPE_NONE);
77 	struct user_ta_ctx *utc = NULL;
78 
79 	if (exp_pt != param_types)
80 		return TEE_ERROR_BAD_PARAMETERS;
81 
82 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
83 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
84 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
85 		return TEE_ERROR_BAD_PARAMETERS;
86 
87 	utc = to_user_ta_ctx(s->ctx);
88 
89 	/*
90 	 * The derived key shall not end up in non-secure memory by
91 	 * mistake.
92 	 *
93 	 * Note that we're allowing shared memory as long as it's
94 	 * secure. This is needed because a TA always uses shared memory
95 	 * when communicating with another TA.
96 	 */
97 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
98 		       TEE_MEMORY_ACCESS_SECURE;
99 	res = tee_mmu_check_access_rights(&utc->uctx, access_flags,
100 					  (uaddr_t)params[1].memref.buffer,
101 					  params[1].memref.size);
102 	if (res != TEE_SUCCESS)
103 		return TEE_ERROR_SECURITY;
104 
105 	/* Take extra data into account. */
106 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
107 		return TEE_ERROR_SECURITY;
108 
109 	data = calloc(data_len, 1);
110 	if (!data)
111 		return TEE_ERROR_OUT_OF_MEMORY;
112 
113 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
114 
115 	/* Append the user provided data */
116 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
117 	       params[0].memref.size);
118 
119 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
120 				params[1].memref.buffer,
121 				params[1].memref.size);
122 	free(data);
123 
124 	return res;
125 }
126 
127 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types,
128 				TEE_Param params[TEE_NUM_PARAMS])
129 {
130 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
131 					  TEE_PARAM_TYPE_VALUE_INOUT,
132 					  TEE_PARAM_TYPE_VALUE_INPUT,
133 					  TEE_PARAM_TYPE_NONE);
134 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
135 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
136 	TEE_Result res = TEE_ERROR_GENERIC;
137 	struct mobj *mobj = NULL;
138 	uint32_t pad_begin = 0;
139 	uint32_t vm_flags = 0;
140 	struct fobj *f = NULL;
141 	uint32_t pad_end = 0;
142 	size_t num_bytes = 0;
143 	vaddr_t va = 0;
144 
145 	if (exp_pt != param_types)
146 		return TEE_ERROR_BAD_PARAMETERS;
147 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
148 		return TEE_ERROR_BAD_PARAMETERS;
149 
150 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
151 		vm_flags |= VM_FLAG_SHAREABLE;
152 
153 	num_bytes = params[0].value.a;
154 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
155 	pad_begin = params[2].value.a;
156 	pad_end = params[2].value.b;
157 
158 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
159 	if (!f)
160 		return TEE_ERROR_OUT_OF_MEMORY;
161 	mobj = mobj_with_fobj_alloc(f, NULL);
162 	fobj_put(f);
163 	if (!mobj)
164 		return TEE_ERROR_OUT_OF_MEMORY;
165 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
166 			 mobj, 0, pad_begin, pad_end);
167 	mobj_put(mobj);
168 	if (!res)
169 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
170 
171 	return res;
172 }
173 
174 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types,
175 			       TEE_Param params[TEE_NUM_PARAMS])
176 {
177 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
178 					  TEE_PARAM_TYPE_VALUE_INPUT,
179 					  TEE_PARAM_TYPE_NONE,
180 					  TEE_PARAM_TYPE_NONE);
181 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
182 	TEE_Result res = TEE_SUCCESS;
183 	uint32_t vm_flags = 0;
184 	vaddr_t va = 0;
185 	size_t sz = 0;
186 
187 	if (exp_pt != param_types)
188 		return TEE_ERROR_BAD_PARAMETERS;
189 
190 	if (params[0].value.b)
191 		return TEE_ERROR_BAD_PARAMETERS;
192 
193 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
194 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
195 
196 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
197 	if (res)
198 		return res;
199 	if (vm_flags & VM_FLAG_PERMANENT)
200 		return TEE_ERROR_ACCESS_DENIED;
201 
202 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz);
203 }
204 
205 static void ta_bin_close(void *ptr)
206 {
207 	struct bin_handle *binh = ptr;
208 
209 	if (binh) {
210 		if (binh->op && binh->h)
211 			binh->op->close(binh->h);
212 		file_put(binh->f);
213 	}
214 	free(binh);
215 }
216 
217 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
218 					uint32_t param_types,
219 					TEE_Param params[TEE_NUM_PARAMS])
220 {
221 	TEE_Result res = TEE_SUCCESS;
222 	struct bin_handle *binh = NULL;
223 	int h = 0;
224 	TEE_UUID *uuid = NULL;
225 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
226 	unsigned int tag_len = sizeof(tag);
227 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
228 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
229 					  TEE_PARAM_TYPE_NONE,
230 					  TEE_PARAM_TYPE_NONE);
231 
232 	if (exp_pt != param_types)
233 		return TEE_ERROR_BAD_PARAMETERS;
234 	if (params[0].memref.size != sizeof(*uuid))
235 		return TEE_ERROR_BAD_PARAMETERS;
236 
237 	uuid = params[0].memref.buffer;
238 
239 	binh = calloc(1, sizeof(*binh));
240 	if (!binh)
241 		return TEE_ERROR_OUT_OF_MEMORY;
242 
243 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) {
244 		DMSG("Lookup user TA ELF %pUl (%s)",
245 		     (void *)uuid, binh->op->description);
246 
247 		res = binh->op->open(uuid, &binh->h);
248 		DMSG("res=0x%x", res);
249 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
250 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
251 			break;
252 	}
253 	if (res)
254 		goto err;
255 
256 	res = binh->op->get_size(binh->h, &binh->size_bytes);
257 	if (res)
258 		goto err;
259 	res = binh->op->get_tag(binh->h, tag, &tag_len);
260 	if (res)
261 		goto err;
262 	binh->f = file_get_by_tag(tag, tag_len);
263 	if (!binh->f)
264 		goto err_oom;
265 
266 	h = handle_get(&ctx->db, binh);
267 	if (h < 0)
268 		goto err_oom;
269 	params[0].value.a = h;
270 
271 	return TEE_SUCCESS;
272 err_oom:
273 	res = TEE_ERROR_OUT_OF_MEMORY;
274 err:
275 	ta_bin_close(binh);
276 	return res;
277 }
278 
279 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
280 					 uint32_t param_types,
281 					 TEE_Param params[TEE_NUM_PARAMS])
282 {
283 	TEE_Result res = TEE_SUCCESS;
284 	struct bin_handle *binh = NULL;
285 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
286 					  TEE_PARAM_TYPE_NONE,
287 					  TEE_PARAM_TYPE_NONE,
288 					  TEE_PARAM_TYPE_NONE);
289 
290 	if (exp_pt != param_types)
291 		return TEE_ERROR_BAD_PARAMETERS;
292 
293 	if (params[0].value.b)
294 		return TEE_ERROR_BAD_PARAMETERS;
295 
296 	binh = handle_put(&ctx->db, params[0].value.a);
297 	if (!binh)
298 		return TEE_ERROR_BAD_PARAMETERS;
299 
300 	if (binh->offs_bytes < binh->size_bytes)
301 		res = binh->op->read(binh->h, NULL,
302 				     binh->size_bytes - binh->offs_bytes);
303 
304 	ta_bin_close(binh);
305 	return res;
306 }
307 
308 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
309 			       size_t offs_bytes, size_t num_bytes)
310 {
311 	TEE_Result res = TEE_SUCCESS;
312 	size_t next_offs = 0;
313 
314 	if (offs_bytes < binh->offs_bytes)
315 		return TEE_ERROR_BAD_STATE;
316 
317 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
318 		return TEE_ERROR_BAD_PARAMETERS;
319 
320 	if (offs_bytes > binh->offs_bytes) {
321 		res = binh->op->read(binh->h, NULL,
322 				     offs_bytes - binh->offs_bytes);
323 		if (res)
324 			return res;
325 		binh->offs_bytes = offs_bytes;
326 	}
327 
328 	if (next_offs > binh->size_bytes) {
329 		size_t rb = binh->size_bytes - binh->offs_bytes;
330 
331 		res = binh->op->read(binh->h, (void *)va, rb);
332 		if (res)
333 			return res;
334 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
335 		binh->offs_bytes = binh->size_bytes;
336 	} else {
337 		res = binh->op->read(binh->h, (void *)va, num_bytes);
338 		if (res)
339 			return res;
340 		binh->offs_bytes = next_offs;
341 	}
342 
343 	return TEE_SUCCESS;
344 }
345 
346 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
347 				       struct tee_ta_session *s,
348 				       uint32_t param_types,
349 				       TEE_Param params[TEE_NUM_PARAMS])
350 {
351 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
352 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
353 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
354 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
355 					  TEE_PARAM_TYPE_VALUE_INPUT,
356 					  TEE_PARAM_TYPE_VALUE_INOUT,
357 					  TEE_PARAM_TYPE_VALUE_INPUT);
358 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
359 	struct bin_handle *binh = NULL;
360 	uint32_t num_rounded_bytes = 0;
361 	TEE_Result res = TEE_SUCCESS;
362 	struct file_slice *fs = NULL;
363 	bool file_is_locked = false;
364 	struct mobj *mobj = NULL;
365 	uint32_t offs_bytes = 0;
366 	uint32_t offs_pages = 0;
367 	uint32_t num_bytes = 0;
368 	uint32_t pad_begin = 0;
369 	uint32_t pad_end = 0;
370 	size_t num_pages = 0;
371 	uint32_t flags = 0;
372 	uint32_t prot = 0;
373 	vaddr_t va = 0;
374 
375 	if (exp_pt != param_types)
376 		return TEE_ERROR_BAD_PARAMETERS;
377 
378 	binh = handle_lookup(&ctx->db, params[0].value.a);
379 	if (!binh)
380 		return TEE_ERROR_BAD_PARAMETERS;
381 	flags = params[0].value.b;
382 	offs_bytes = params[1].value.a;
383 	num_bytes = params[1].value.b;
384 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
385 	pad_begin = params[3].value.a;
386 	pad_end = params[3].value.b;
387 
388 	if ((flags & accept_flags) != flags)
389 		return TEE_ERROR_BAD_PARAMETERS;
390 
391 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
392 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
393 		return TEE_ERROR_BAD_PARAMETERS;
394 
395 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
396 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
397 		return TEE_ERROR_BAD_PARAMETERS;
398 
399 	if (offs_bytes & SMALL_PAGE_MASK)
400 		return TEE_ERROR_BAD_PARAMETERS;
401 
402 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
403 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
404 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
405 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
406 		prot |= TEE_MATTR_UX;
407 
408 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
409 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
410 		return TEE_ERROR_BAD_PARAMETERS;
411 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
412 
413 	if (!file_trylock(binh->f)) {
414 		/*
415 		 * Before we can block on the file lock we must make all
416 		 * our page tables available for reclaiming in order to
417 		 * avoid a dead-lock with the other thread (which already
418 		 * is holding the file lock) mapping lots of memory below.
419 		 */
420 		tee_mmu_set_ctx(NULL);
421 		file_lock(binh->f);
422 		tee_mmu_set_ctx(s->ctx);
423 	}
424 	file_is_locked = true;
425 	fs = file_find_slice(binh->f, offs_pages);
426 	if (fs) {
427 		/* If there's registered slice it has to match */
428 		if (fs->page_offset != offs_pages ||
429 		    num_pages > fs->fobj->num_pages) {
430 			res = TEE_ERROR_BAD_PARAMETERS;
431 			goto err;
432 		}
433 
434 		/* If there's a slice we must be mapping shareable */
435 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
436 			res = TEE_ERROR_BAD_PARAMETERS;
437 			goto err;
438 		}
439 
440 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
441 		if (!mobj) {
442 			res = TEE_ERROR_OUT_OF_MEMORY;
443 			goto err;
444 		}
445 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
446 				 prot, VM_FLAG_READONLY,
447 				 mobj, 0, pad_begin, pad_end);
448 		mobj_put(mobj);
449 		if (res)
450 			goto err;
451 	} else {
452 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
453 		struct file *file = NULL;
454 		uint32_t vm_flags = 0;
455 
456 		if (!f) {
457 			res = TEE_ERROR_OUT_OF_MEMORY;
458 			goto err;
459 		}
460 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
461 			file = binh->f;
462 			vm_flags |= VM_FLAG_READONLY;
463 		}
464 
465 		mobj = mobj_with_fobj_alloc(f, file);
466 		fobj_put(f);
467 		if (!mobj) {
468 			res = TEE_ERROR_OUT_OF_MEMORY;
469 			goto err;
470 		}
471 		res = vm_map_pad(&utc->uctx, &va, num_rounded_bytes,
472 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
473 				 pad_begin, pad_end);
474 		mobj_put(mobj);
475 		if (res)
476 			goto err;
477 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
478 		if (res)
479 			goto err_unmap_va;
480 		res = vm_set_prot(&utc->uctx, va, num_rounded_bytes,
481 				  prot);
482 		if (res)
483 			goto err_unmap_va;
484 
485 		/*
486 		 * The context currently is active set it again to update
487 		 * the mapping.
488 		 */
489 		tee_mmu_set_ctx(s->ctx);
490 
491 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
492 			res = file_add_slice(binh->f, f, offs_pages);
493 			if (res)
494 				goto err_unmap_va;
495 		}
496 	}
497 
498 	file_unlock(binh->f);
499 
500 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
501 	return TEE_SUCCESS;
502 
503 err_unmap_va:
504 	if (vm_unmap(&utc->uctx, va, num_rounded_bytes))
505 		panic();
506 
507 	/*
508 	 * The context currently is active set it again to update
509 	 * the mapping.
510 	 */
511 	tee_mmu_set_ctx(s->ctx);
512 
513 err:
514 	if (file_is_locked)
515 		file_unlock(binh->f);
516 
517 	return res;
518 }
519 
520 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
521 					     uint32_t param_types,
522 					     TEE_Param params[TEE_NUM_PARAMS])
523 {
524 	struct bin_handle *binh = NULL;
525 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
526 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
527 					  TEE_PARAM_TYPE_NONE,
528 					  TEE_PARAM_TYPE_NONE);
529 
530 	if (exp_pt != param_types)
531 		return TEE_ERROR_BAD_PARAMETERS;
532 
533 	binh = handle_lookup(&ctx->db, params[0].value.a);
534 	if (!binh)
535 		return TEE_ERROR_BAD_PARAMETERS;
536 
537 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
538 			    params[0].value.b, params[1].memref.size);
539 }
540 
541 static TEE_Result system_set_prot(struct tee_ta_session *s,
542 				  uint32_t param_types,
543 				  TEE_Param params[TEE_NUM_PARAMS])
544 {
545 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
546 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
547 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
548 					  TEE_PARAM_TYPE_VALUE_INPUT,
549 					  TEE_PARAM_TYPE_NONE,
550 					  TEE_PARAM_TYPE_NONE);
551 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
552 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
553 	TEE_Result res = TEE_SUCCESS;
554 	uint32_t vm_flags = 0;
555 	uint32_t flags = 0;
556 	vaddr_t end_va = 0;
557 	vaddr_t va = 0;
558 	size_t sz = 0;
559 
560 	if (exp_pt != param_types)
561 		return TEE_ERROR_BAD_PARAMETERS;
562 
563 	flags = params[0].value.b;
564 
565 	if ((flags & accept_flags) != flags)
566 		return TEE_ERROR_BAD_PARAMETERS;
567 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
568 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
569 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
570 		prot |= TEE_MATTR_UX;
571 
572 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
573 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
574 
575 	/*
576 	 * The vm_get_flags() and vm_set_prot() are supposed to detect or
577 	 * handle overflow directly or indirectly. However, this function
578 	 * an API function so an extra guard here is in order. If nothing
579 	 * else to make it easier to review the code.
580 	 */
581 	if (ADD_OVERFLOW(va, sz, &end_va))
582 		return TEE_ERROR_BAD_PARAMETERS;
583 
584 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
585 	if (res)
586 		return res;
587 	if (vm_flags & VM_FLAG_PERMANENT)
588 		return TEE_ERROR_ACCESS_DENIED;
589 
590 	/*
591 	 * If the segment is a mapping of a part of a file (vm_flags &
592 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
593 	 * files are mapped read-only.
594 	 */
595 	if ((vm_flags & VM_FLAG_READONLY) &&
596 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
597 		return TEE_ERROR_ACCESS_DENIED;
598 
599 	return vm_set_prot(&utc->uctx, va, sz, prot);
600 }
601 
602 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types,
603 			       TEE_Param params[TEE_NUM_PARAMS])
604 {
605 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
606 					  TEE_PARAM_TYPE_VALUE_INPUT,
607 					  TEE_PARAM_TYPE_VALUE_INOUT,
608 					  TEE_PARAM_TYPE_VALUE_INPUT);
609 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
610 	TEE_Result res = TEE_SUCCESS;
611 	uint32_t num_bytes = 0;
612 	uint32_t pad_begin = 0;
613 	uint32_t vm_flags = 0;
614 	uint32_t pad_end = 0;
615 	vaddr_t old_va = 0;
616 	vaddr_t new_va = 0;
617 
618 	if (exp_pt != param_types)
619 		return TEE_ERROR_BAD_PARAMETERS;
620 
621 	num_bytes = params[0].value.a;
622 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
623 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
624 	pad_begin = params[3].value.a;
625 	pad_end = params[3].value.b;
626 
627 	res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags);
628 	if (res)
629 		return res;
630 	if (vm_flags & VM_FLAG_PERMANENT)
631 		return TEE_ERROR_ACCESS_DENIED;
632 
633 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
634 		       pad_end);
635 	if (!res)
636 		reg_pair_from_64(new_va, &params[2].value.a,
637 				 &params[2].value.b);
638 
639 	return res;
640 }
641 
642 /* ldelf has the same architecture/register width as the kernel */
643 #ifdef ARM32
644 static const bool is_arm32 = true;
645 #else
646 static const bool is_arm32;
647 #endif
648 
649 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
650 				    uint32_t flags)
651 {
652 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
653 	TEE_Result res = TEE_ERROR_GENERIC;
654 	struct dl_entry_arg *arg = NULL;
655 	uint32_t panic_code = 0;
656 	uint32_t panicked = 0;
657 
658 	assert(uuid);
659 
660 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
661 	arg = (struct dl_entry_arg *)usr_stack;
662 
663 	res = tee_mmu_check_access_rights(&utc->uctx,
664 					  TEE_MEMORY_ACCESS_READ |
665 					  TEE_MEMORY_ACCESS_WRITE |
666 					  TEE_MEMORY_ACCESS_ANY_OWNER,
667 					  (uaddr_t)arg, sizeof(*arg));
668 	if (res) {
669 		EMSG("ldelf stack is inaccessible!");
670 		return res;
671 	}
672 
673 	memset(arg, 0, sizeof(*arg));
674 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
675 	arg->dlopen.uuid = *uuid;
676 	arg->dlopen.flags = flags;
677 
678 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
679 				     usr_stack, utc->dl_entry_func,
680 				     is_arm32, &panicked, &panic_code);
681 	if (panicked) {
682 		EMSG("ldelf dl_entry function panicked");
683 		abort_print_current_ta();
684 		res = TEE_ERROR_TARGET_DEAD;
685 	}
686 	if (!res)
687 		res = arg->ret;
688 
689 	return res;
690 }
691 
692 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
693 				   const char *sym, size_t maxlen, vaddr_t *val)
694 {
695 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
696 	TEE_Result res = TEE_ERROR_GENERIC;
697 	struct dl_entry_arg *arg = NULL;
698 	uint32_t panic_code = 0;
699 	uint32_t panicked = 0;
700 	size_t len = strnlen(sym, maxlen);
701 
702 	if (len == maxlen)
703 		return TEE_ERROR_BAD_PARAMETERS;
704 
705 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
706 	arg = (struct dl_entry_arg *)usr_stack;
707 
708 	res = tee_mmu_check_access_rights(&utc->uctx,
709 					  TEE_MEMORY_ACCESS_READ |
710 					  TEE_MEMORY_ACCESS_WRITE |
711 					  TEE_MEMORY_ACCESS_ANY_OWNER,
712 					  (uaddr_t)arg, sizeof(*arg) + len + 1);
713 	if (res) {
714 		EMSG("ldelf stack is inaccessible!");
715 		return res;
716 	}
717 
718 	memset(arg, 0, sizeof(*arg));
719 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
720 	arg->dlsym.uuid = *uuid;
721 	memcpy(arg->dlsym.symbol, sym, len);
722 	arg->dlsym.symbol[len] = '\0';
723 
724 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
725 				     usr_stack, utc->dl_entry_func,
726 				     is_arm32, &panicked, &panic_code);
727 	if (panicked) {
728 		EMSG("ldelf dl_entry function panicked");
729 		abort_print_current_ta();
730 		res = TEE_ERROR_TARGET_DEAD;
731 	}
732 	if (!res) {
733 		res = arg->ret;
734 		if (!res)
735 			*val = arg->dlsym.val;
736 	}
737 
738 	return res;
739 }
740 
741 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types,
742 				TEE_Param params[TEE_NUM_PARAMS])
743 {
744 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
745 					  TEE_PARAM_TYPE_VALUE_INPUT,
746 					  TEE_PARAM_TYPE_NONE,
747 					  TEE_PARAM_TYPE_NONE);
748 	TEE_Result res = TEE_ERROR_GENERIC;
749 	struct tee_ta_session *s = NULL;
750 	struct user_ta_ctx *utc = NULL;
751 	TEE_UUID *uuid = NULL;
752 	uint32_t flags = 0;
753 
754 	if (exp_pt != param_types)
755 		return TEE_ERROR_BAD_PARAMETERS;
756 
757 	uuid = params[0].memref.buffer;
758 	if (!uuid || params[0].memref.size != sizeof(*uuid))
759 		return TEE_ERROR_BAD_PARAMETERS;
760 
761 	flags = params[1].value.a;
762 
763 	utc = to_user_ta_ctx(cs->ctx);
764 
765 	s = tee_ta_pop_current_session();
766 	res = call_ldelf_dlopen(utc, uuid, flags);
767 	tee_ta_push_current_session(s);
768 
769 	return res;
770 }
771 
772 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types,
773 			       TEE_Param params[TEE_NUM_PARAMS])
774 {
775 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
776 					  TEE_PARAM_TYPE_MEMREF_INPUT,
777 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
778 					  TEE_PARAM_TYPE_NONE);
779 	TEE_Result res = TEE_ERROR_GENERIC;
780 	struct tee_ta_session *s = NULL;
781 	struct user_ta_ctx *utc = NULL;
782 	const char *sym = NULL;
783 	TEE_UUID *uuid = NULL;
784 	size_t maxlen = 0;
785 	vaddr_t va = 0;
786 
787 	if (exp_pt != param_types)
788 		return TEE_ERROR_BAD_PARAMETERS;
789 
790 	uuid = params[0].memref.buffer;
791 	if (uuid && params[0].memref.size != sizeof(*uuid))
792 		return TEE_ERROR_BAD_PARAMETERS;
793 
794 	sym = params[1].memref.buffer;
795 	if (!sym)
796 		return TEE_ERROR_BAD_PARAMETERS;
797 	maxlen = params[1].memref.size;
798 
799 	utc = to_user_ta_ctx(cs->ctx);
800 
801 	s = tee_ta_pop_current_session();
802 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
803 	tee_ta_push_current_session(s);
804 
805 	if (!res)
806 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
807 
808 	return res;
809 }
810 
811 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
812 					   TEE_Param params[TEE_NUM_PARAMS])
813 {
814 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
815 					  TEE_PARAM_TYPE_NONE,
816 					  TEE_PARAM_TYPE_NONE,
817 					  TEE_PARAM_TYPE_NONE);
818 	size_t size = 0;
819 	TEE_Result res = TEE_SUCCESS;
820 
821 	if (exp_pt != param_types)
822 		return TEE_ERROR_BAD_PARAMETERS;
823 
824 	size = params[0].memref.size;
825 	res = tpm_get_event_log(params[0].memref.buffer, &size);
826 	params[0].memref.size = size;
827 
828 	return res;
829 }
830 
831 static TEE_Result open_session(uint32_t param_types __unused,
832 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
833 			       void **sess_ctx)
834 {
835 	struct tee_ta_session *s = NULL;
836 	struct system_ctx *ctx = NULL;
837 
838 	/* Check that we're called from a user TA */
839 	s = tee_ta_get_calling_session();
840 	if (!s)
841 		return TEE_ERROR_ACCESS_DENIED;
842 	if (!is_user_ta_ctx(s->ctx))
843 		return TEE_ERROR_ACCESS_DENIED;
844 
845 	ctx = calloc(1, sizeof(*ctx));
846 	if (!ctx)
847 		return TEE_ERROR_OUT_OF_MEMORY;
848 
849 	*sess_ctx = ctx;
850 
851 	return TEE_SUCCESS;
852 }
853 
854 static void close_session(void *sess_ctx)
855 {
856 	struct system_ctx *ctx = sess_ctx;
857 
858 	handle_db_destroy(&ctx->db, ta_bin_close);
859 	free(ctx);
860 }
861 
862 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
863 				 uint32_t param_types,
864 				 TEE_Param params[TEE_NUM_PARAMS])
865 {
866 	struct tee_ta_session *s = tee_ta_get_calling_session();
867 
868 	switch (cmd_id) {
869 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
870 		return system_rng_reseed(s, param_types, params);
871 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
872 		return system_derive_ta_unique_key(s, param_types, params);
873 	case PTA_SYSTEM_MAP_ZI:
874 		return system_map_zi(s, param_types, params);
875 	case PTA_SYSTEM_UNMAP:
876 		return system_unmap(s, param_types, params);
877 	case PTA_SYSTEM_OPEN_TA_BINARY:
878 		return system_open_ta_binary(sess_ctx, param_types, params);
879 	case PTA_SYSTEM_CLOSE_TA_BINARY:
880 		return system_close_ta_binary(sess_ctx, param_types, params);
881 	case PTA_SYSTEM_MAP_TA_BINARY:
882 		return system_map_ta_binary(sess_ctx, s, param_types, params);
883 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
884 		return system_copy_from_ta_binary(sess_ctx, param_types,
885 						  params);
886 	case PTA_SYSTEM_SET_PROT:
887 		return system_set_prot(s, param_types, params);
888 	case PTA_SYSTEM_REMAP:
889 		return system_remap(s, param_types, params);
890 	case PTA_SYSTEM_DLOPEN:
891 		return system_dlopen(s, param_types, params);
892 	case PTA_SYSTEM_DLSYM:
893 		return system_dlsym(s, param_types, params);
894 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
895 		return system_get_tpm_event_log(param_types, params);
896 	default:
897 		break;
898 	}
899 
900 	return TEE_ERROR_NOT_IMPLEMENTED;
901 }
902 
903 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
904 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
905 		   .open_session_entry_point = open_session,
906 		   .close_session_entry_point = close_session,
907 		   .invoke_command_entry_point = invoke_command);
908