xref: /optee_os/core/pta/system.c (revision c2020b9d076931e13f808f879f12fd1dc0bc4d05)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <crypto/crypto.h>
8 #include <kernel/handle.h>
9 #include <kernel/huk_subkey.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/pseudo_ta.h>
13 #include <kernel/user_ta.h>
14 #include <kernel/user_ta_store.h>
15 #include <ldelf.h>
16 #include <mm/file.h>
17 #include <mm/fobj.h>
18 #include <mm/tee_mmu.h>
19 #include <pta_system.h>
20 #include <string.h>
21 #include <tee_api_defines_extensions.h>
22 #include <tee_api_defines.h>
23 #include <util.h>
24 #include <kernel/tpm.h>
25 
26 struct bin_handle {
27 	const struct user_ta_store_ops *op;
28 	struct user_ta_store_handle *h;
29 	struct file *f;
30 	size_t offs_bytes;
31 	size_t size_bytes;
32 };
33 
34 struct system_ctx {
35 	struct handle_db db;
36 	const struct user_ta_store_ops *store_op;
37 };
38 
39 static unsigned int system_pnum;
40 
41 static TEE_Result system_rng_reseed(struct tee_ta_session *s __unused,
42 				uint32_t param_types,
43 				TEE_Param params[TEE_NUM_PARAMS])
44 {
45 	size_t entropy_sz;
46 	uint8_t *entropy_input;
47 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
48 					  TEE_PARAM_TYPE_NONE,
49 					  TEE_PARAM_TYPE_NONE,
50 					  TEE_PARAM_TYPE_NONE);
51 
52 	if (exp_pt != param_types)
53 		return TEE_ERROR_BAD_PARAMETERS;
54 	entropy_input = params[0].memref.buffer;
55 	entropy_sz = params[0].memref.size;
56 
57 	if (!entropy_sz || !entropy_input)
58 		return TEE_ERROR_BAD_PARAMETERS;
59 
60 	crypto_rng_add_event(CRYPTO_RNG_SRC_NONSECURE, &system_pnum,
61 			     entropy_input, entropy_sz);
62 	return TEE_SUCCESS;
63 }
64 
65 static TEE_Result system_derive_ta_unique_key(struct tee_ta_session *s,
66 					      uint32_t param_types,
67 					      TEE_Param params[TEE_NUM_PARAMS])
68 {
69 	size_t data_len = sizeof(TEE_UUID);
70 	TEE_Result res = TEE_ERROR_GENERIC;
71 	uint8_t *data = NULL;
72 	uint32_t access_flags = 0;
73 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
74 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
75 					  TEE_PARAM_TYPE_NONE,
76 					  TEE_PARAM_TYPE_NONE);
77 	struct user_ta_ctx *utc = NULL;
78 
79 	if (exp_pt != param_types)
80 		return TEE_ERROR_BAD_PARAMETERS;
81 
82 	if (params[0].memref.size > TA_DERIVED_EXTRA_DATA_MAX_SIZE ||
83 	    params[1].memref.size < TA_DERIVED_KEY_MIN_SIZE ||
84 	    params[1].memref.size > TA_DERIVED_KEY_MAX_SIZE)
85 		return TEE_ERROR_BAD_PARAMETERS;
86 
87 	utc = to_user_ta_ctx(s->ctx);
88 
89 	/*
90 	 * The derived key shall not end up in non-secure memory by
91 	 * mistake.
92 	 *
93 	 * Note that we're allowing shared memory as long as it's
94 	 * secure. This is needed because a TA always uses shared memory
95 	 * when communicating with another TA.
96 	 */
97 	access_flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER |
98 		       TEE_MEMORY_ACCESS_SECURE;
99 	res = tee_mmu_check_access_rights(&utc->uctx, access_flags,
100 					  (uaddr_t)params[1].memref.buffer,
101 					  params[1].memref.size);
102 	if (res != TEE_SUCCESS)
103 		return TEE_ERROR_SECURITY;
104 
105 	/* Take extra data into account. */
106 	if (ADD_OVERFLOW(data_len, params[0].memref.size, &data_len))
107 		return TEE_ERROR_SECURITY;
108 
109 	data = calloc(data_len, 1);
110 	if (!data)
111 		return TEE_ERROR_OUT_OF_MEMORY;
112 
113 	memcpy(data, &s->ctx->uuid, sizeof(TEE_UUID));
114 
115 	/* Append the user provided data */
116 	memcpy(data + sizeof(TEE_UUID), params[0].memref.buffer,
117 	       params[0].memref.size);
118 
119 	res = huk_subkey_derive(HUK_SUBKEY_UNIQUE_TA, data, data_len,
120 				params[1].memref.buffer,
121 				params[1].memref.size);
122 	free(data);
123 
124 	return res;
125 }
126 
127 static TEE_Result system_map_zi(struct tee_ta_session *s, uint32_t param_types,
128 				TEE_Param params[TEE_NUM_PARAMS])
129 {
130 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
131 					  TEE_PARAM_TYPE_VALUE_INOUT,
132 					  TEE_PARAM_TYPE_VALUE_INPUT,
133 					  TEE_PARAM_TYPE_NONE);
134 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
135 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
136 	TEE_Result res = TEE_ERROR_GENERIC;
137 	struct mobj *mobj = NULL;
138 	uint32_t pad_begin = 0;
139 	uint32_t vm_flags = 0;
140 	struct fobj *f = NULL;
141 	uint32_t pad_end = 0;
142 	size_t num_bytes = 0;
143 	vaddr_t va = 0;
144 
145 	if (exp_pt != param_types)
146 		return TEE_ERROR_BAD_PARAMETERS;
147 	if (params[0].value.b & ~PTA_SYSTEM_MAP_FLAG_SHAREABLE)
148 		return TEE_ERROR_BAD_PARAMETERS;
149 
150 	if (params[0].value.b & PTA_SYSTEM_MAP_FLAG_SHAREABLE)
151 		vm_flags |= VM_FLAG_SHAREABLE;
152 
153 	num_bytes = params[0].value.a;
154 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
155 	pad_begin = params[2].value.a;
156 	pad_end = params[2].value.b;
157 
158 	f = fobj_ta_mem_alloc(ROUNDUP(num_bytes, SMALL_PAGE_SIZE) /
159 			      SMALL_PAGE_SIZE);
160 	if (!f)
161 		return TEE_ERROR_OUT_OF_MEMORY;
162 	mobj = mobj_with_fobj_alloc(f, NULL);
163 	fobj_put(f);
164 	if (!mobj)
165 		return TEE_ERROR_OUT_OF_MEMORY;
166 	res = vm_map_pad(&utc->uctx, &va, num_bytes, prot, vm_flags,
167 			 mobj, 0, pad_begin, pad_end);
168 	mobj_put(mobj);
169 	if (!res)
170 		reg_pair_from_64(va, &params[1].value.a, &params[1].value.b);
171 
172 	return res;
173 }
174 
175 static TEE_Result system_unmap(struct tee_ta_session *s, uint32_t param_types,
176 			       TEE_Param params[TEE_NUM_PARAMS])
177 {
178 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
179 					  TEE_PARAM_TYPE_VALUE_INPUT,
180 					  TEE_PARAM_TYPE_NONE,
181 					  TEE_PARAM_TYPE_NONE);
182 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
183 	TEE_Result res = TEE_SUCCESS;
184 	uint32_t vm_flags = 0;
185 	vaddr_t va = 0;
186 	size_t sz = 0;
187 
188 	if (exp_pt != param_types)
189 		return TEE_ERROR_BAD_PARAMETERS;
190 
191 	if (params[0].value.b)
192 		return TEE_ERROR_BAD_PARAMETERS;
193 
194 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
195 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
196 
197 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
198 	if (res)
199 		return res;
200 	if (vm_flags & VM_FLAG_PERMANENT)
201 		return TEE_ERROR_ACCESS_DENIED;
202 
203 	return vm_unmap(&to_user_ta_ctx(s->ctx)->uctx, va, sz);
204 }
205 
206 static void ta_bin_close(void *ptr)
207 {
208 	struct bin_handle *binh = ptr;
209 
210 	if (binh) {
211 		if (binh->op && binh->h)
212 			binh->op->close(binh->h);
213 		file_put(binh->f);
214 	}
215 	free(binh);
216 }
217 
218 static TEE_Result system_open_ta_binary(struct system_ctx *ctx,
219 					uint32_t param_types,
220 					TEE_Param params[TEE_NUM_PARAMS])
221 {
222 	TEE_Result res = TEE_SUCCESS;
223 	struct bin_handle *binh = NULL;
224 	int h = 0;
225 	TEE_UUID *uuid = NULL;
226 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
227 	unsigned int tag_len = sizeof(tag);
228 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
229 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
230 					  TEE_PARAM_TYPE_NONE,
231 					  TEE_PARAM_TYPE_NONE);
232 
233 	if (exp_pt != param_types)
234 		return TEE_ERROR_BAD_PARAMETERS;
235 	if (params[0].memref.size != sizeof(*uuid))
236 		return TEE_ERROR_BAD_PARAMETERS;
237 
238 	uuid = params[0].memref.buffer;
239 
240 	binh = calloc(1, sizeof(*binh));
241 	if (!binh)
242 		return TEE_ERROR_OUT_OF_MEMORY;
243 
244 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct user_ta_store_ops) {
245 		DMSG("Lookup user TA ELF %pUl (%s)",
246 		     (void *)uuid, binh->op->description);
247 
248 		res = binh->op->open(uuid, &binh->h);
249 		DMSG("res=0x%x", res);
250 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
251 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
252 			break;
253 	}
254 	if (res)
255 		goto err;
256 
257 	res = binh->op->get_size(binh->h, &binh->size_bytes);
258 	if (res)
259 		goto err;
260 	res = binh->op->get_tag(binh->h, tag, &tag_len);
261 	if (res)
262 		goto err;
263 	binh->f = file_get_by_tag(tag, tag_len);
264 	if (!binh->f)
265 		goto err_oom;
266 
267 	h = handle_get(&ctx->db, binh);
268 	if (h < 0)
269 		goto err_oom;
270 
271 	return TEE_SUCCESS;
272 err_oom:
273 	res = TEE_ERROR_OUT_OF_MEMORY;
274 err:
275 	ta_bin_close(binh);
276 	return res;
277 }
278 
279 static TEE_Result system_close_ta_binary(struct system_ctx *ctx,
280 					 uint32_t param_types,
281 					 TEE_Param params[TEE_NUM_PARAMS])
282 {
283 	TEE_Result res = TEE_SUCCESS;
284 	struct bin_handle *binh = NULL;
285 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
286 					  TEE_PARAM_TYPE_NONE,
287 					  TEE_PARAM_TYPE_NONE,
288 					  TEE_PARAM_TYPE_NONE);
289 
290 	if (exp_pt != param_types)
291 		return TEE_ERROR_BAD_PARAMETERS;
292 
293 	if (params[0].value.b)
294 		return TEE_ERROR_BAD_PARAMETERS;
295 
296 	binh = handle_put(&ctx->db, params[0].value.a);
297 	if (!binh)
298 		return TEE_ERROR_BAD_PARAMETERS;
299 
300 	if (binh->offs_bytes < binh->size_bytes)
301 		res = binh->op->read(binh->h, NULL,
302 				     binh->size_bytes - binh->offs_bytes);
303 
304 	ta_bin_close(binh);
305 	return res;
306 }
307 
308 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
309 			       size_t offs_bytes, size_t num_bytes)
310 {
311 	TEE_Result res = TEE_SUCCESS;
312 	size_t l =  num_bytes;
313 
314 	if (offs_bytes < binh->offs_bytes)
315 		return TEE_ERROR_BAD_STATE;
316 	if (offs_bytes > binh->offs_bytes) {
317 		res = binh->op->read(binh->h, NULL,
318 				     offs_bytes - binh->offs_bytes);
319 		if (res)
320 			return res;
321 		binh->offs_bytes = offs_bytes;
322 	}
323 
324 	if (binh->offs_bytes + l > binh->size_bytes) {
325 		size_t rb = binh->size_bytes - binh->offs_bytes;
326 
327 		res = binh->op->read(binh->h, (void *)va, rb);
328 		if (res)
329 			return res;
330 		memset((uint8_t *)va + rb, 0, l - rb);
331 		binh->offs_bytes = binh->size_bytes;
332 	} else {
333 		res = binh->op->read(binh->h, (void *)va, l);
334 		if (res)
335 			return res;
336 		binh->offs_bytes += l;
337 	}
338 
339 	return TEE_SUCCESS;
340 }
341 
342 static TEE_Result system_map_ta_binary(struct system_ctx *ctx,
343 				       struct tee_ta_session *s,
344 				       uint32_t param_types,
345 				       TEE_Param params[TEE_NUM_PARAMS])
346 {
347 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
348 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
349 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
350 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
351 					  TEE_PARAM_TYPE_VALUE_INPUT,
352 					  TEE_PARAM_TYPE_VALUE_INOUT,
353 					  TEE_PARAM_TYPE_VALUE_INPUT);
354 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
355 	struct bin_handle *binh = NULL;
356 	TEE_Result res = TEE_SUCCESS;
357 	struct file_slice *fs = NULL;
358 	bool file_is_locked = false;
359 	struct mobj *mobj = NULL;
360 	uint32_t offs_bytes = 0;
361 	uint32_t offs_pages = 0;
362 	uint32_t num_bytes = 0;
363 	uint32_t pad_begin = 0;
364 	uint32_t pad_end = 0;
365 	size_t num_pages = 0;
366 	uint32_t flags = 0;
367 	uint32_t prot = 0;
368 	vaddr_t va = 0;
369 
370 	if (exp_pt != param_types)
371 		return TEE_ERROR_BAD_PARAMETERS;
372 
373 	binh = handle_lookup(&ctx->db, params[0].value.a);
374 	if (!binh)
375 		return TEE_ERROR_BAD_PARAMETERS;
376 	flags = params[0].value.b;
377 	offs_bytes = params[1].value.a;
378 	num_bytes = params[1].value.b;
379 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
380 	pad_begin = params[3].value.a;
381 	pad_end = params[3].value.b;
382 
383 	if ((flags & accept_flags) != flags)
384 		return TEE_ERROR_BAD_PARAMETERS;
385 
386 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
387 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
388 		return TEE_ERROR_BAD_PARAMETERS;
389 
390 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
391 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
392 		return TEE_ERROR_BAD_PARAMETERS;
393 
394 	if (offs_bytes & SMALL_PAGE_MASK)
395 		return TEE_ERROR_BAD_PARAMETERS;
396 
397 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
398 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
399 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
400 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
401 		prot |= TEE_MATTR_UX;
402 
403 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
404 	num_pages = ROUNDUP(num_bytes, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
405 
406 	if (!file_trylock(binh->f)) {
407 		/*
408 		 * Before we can block on the file lock we must make all
409 		 * our page tables available for reclaiming in order to
410 		 * avoid a dead-lock with the other thread (which already
411 		 * is holding the file lock) mapping lots of memory below.
412 		 */
413 		tee_mmu_set_ctx(NULL);
414 		file_lock(binh->f);
415 		tee_mmu_set_ctx(s->ctx);
416 	}
417 	file_is_locked = true;
418 	fs = file_find_slice(binh->f, offs_pages);
419 	if (fs) {
420 		/* If there's registered slice it has to match */
421 		if (fs->page_offset != offs_pages ||
422 		    num_pages > fs->fobj->num_pages) {
423 			res = TEE_ERROR_BAD_PARAMETERS;
424 			goto err;
425 		}
426 
427 		/* If there's a slice we must be mapping shareable */
428 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
429 			res = TEE_ERROR_BAD_PARAMETERS;
430 			goto err;
431 		}
432 
433 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
434 		if (!mobj) {
435 			res = TEE_ERROR_OUT_OF_MEMORY;
436 			goto err;
437 		}
438 		res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE,
439 				 prot, VM_FLAG_READONLY,
440 				 mobj, 0, pad_begin, pad_end);
441 		mobj_put(mobj);
442 		if (res)
443 			goto err;
444 	} else {
445 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
446 		struct file *file = NULL;
447 		uint32_t vm_flags = 0;
448 
449 		if (!f) {
450 			res = TEE_ERROR_OUT_OF_MEMORY;
451 			goto err;
452 		}
453 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
454 			file = binh->f;
455 			vm_flags |= VM_FLAG_READONLY;
456 		}
457 
458 		mobj = mobj_with_fobj_alloc(f, file);
459 		fobj_put(f);
460 		if (!mobj) {
461 			res = TEE_ERROR_OUT_OF_MEMORY;
462 			goto err;
463 		}
464 		res = vm_map_pad(&utc->uctx, &va, num_pages * SMALL_PAGE_SIZE,
465 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
466 				 pad_begin, pad_end);
467 		mobj_put(mobj);
468 		if (res)
469 			goto err;
470 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
471 		if (res)
472 			goto err_unmap_va;
473 		res = vm_set_prot(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE,
474 				  prot);
475 		if (res)
476 			goto err_unmap_va;
477 
478 		/*
479 		 * The context currently is active set it again to update
480 		 * the mapping.
481 		 */
482 		tee_mmu_set_ctx(s->ctx);
483 
484 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
485 			res = file_add_slice(binh->f, f, offs_pages);
486 			if (res)
487 				goto err_unmap_va;
488 		}
489 	}
490 
491 	file_unlock(binh->f);
492 
493 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
494 	return TEE_SUCCESS;
495 
496 err_unmap_va:
497 	if (vm_unmap(&utc->uctx, va, num_pages * SMALL_PAGE_SIZE))
498 		panic();
499 
500 	/*
501 	 * The context currently is active set it again to update
502 	 * the mapping.
503 	 */
504 	tee_mmu_set_ctx(s->ctx);
505 
506 err:
507 	if (file_is_locked)
508 		file_unlock(binh->f);
509 
510 	return res;
511 }
512 
513 static TEE_Result system_copy_from_ta_binary(struct system_ctx *ctx,
514 					     uint32_t param_types,
515 					     TEE_Param params[TEE_NUM_PARAMS])
516 {
517 	struct bin_handle *binh = NULL;
518 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
519 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
520 					  TEE_PARAM_TYPE_NONE,
521 					  TEE_PARAM_TYPE_NONE);
522 
523 	if (exp_pt != param_types)
524 		return TEE_ERROR_BAD_PARAMETERS;
525 
526 	binh = handle_lookup(&ctx->db, params[0].value.a);
527 	if (!binh)
528 		return TEE_ERROR_BAD_PARAMETERS;
529 
530 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
531 			    params[0].value.b, params[1].memref.size);
532 }
533 
534 static TEE_Result system_set_prot(struct tee_ta_session *s,
535 				  uint32_t param_types,
536 				  TEE_Param params[TEE_NUM_PARAMS])
537 {
538 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
539 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
540 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
541 					  TEE_PARAM_TYPE_VALUE_INPUT,
542 					  TEE_PARAM_TYPE_NONE,
543 					  TEE_PARAM_TYPE_NONE);
544 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
545 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
546 	TEE_Result res = TEE_SUCCESS;
547 	uint32_t vm_flags = 0;
548 	uint32_t flags = 0;
549 	vaddr_t va = 0;
550 	size_t sz = 0;
551 
552 	if (exp_pt != param_types)
553 		return TEE_ERROR_BAD_PARAMETERS;
554 
555 	flags = params[0].value.b;
556 
557 	if ((flags & accept_flags) != flags)
558 		return TEE_ERROR_BAD_PARAMETERS;
559 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
560 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
561 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
562 		prot |= TEE_MATTR_UX;
563 
564 	va = reg_pair_to_64(params[1].value.a, params[1].value.b),
565 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
566 
567 	res = vm_get_flags(&utc->uctx, va, sz, &vm_flags);
568 	if (res)
569 		return res;
570 	if (vm_flags & VM_FLAG_PERMANENT)
571 		return TEE_ERROR_ACCESS_DENIED;
572 
573 	/*
574 	 * If the segment is a mapping of a part of a file (vm_flags &
575 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
576 	 * files are mapped read-only.
577 	 */
578 	if ((vm_flags & VM_FLAG_READONLY) &&
579 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
580 		return TEE_ERROR_ACCESS_DENIED;
581 
582 	return vm_set_prot(&utc->uctx, va, sz, prot);
583 }
584 
585 static TEE_Result system_remap(struct tee_ta_session *s, uint32_t param_types,
586 			       TEE_Param params[TEE_NUM_PARAMS])
587 {
588 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
589 					  TEE_PARAM_TYPE_VALUE_INPUT,
590 					  TEE_PARAM_TYPE_VALUE_INOUT,
591 					  TEE_PARAM_TYPE_VALUE_INPUT);
592 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
593 	TEE_Result res = TEE_SUCCESS;
594 	uint32_t num_bytes = 0;
595 	uint32_t pad_begin = 0;
596 	uint32_t vm_flags = 0;
597 	uint32_t pad_end = 0;
598 	vaddr_t old_va = 0;
599 	vaddr_t new_va = 0;
600 
601 	if (exp_pt != param_types)
602 		return TEE_ERROR_BAD_PARAMETERS;
603 
604 	num_bytes = params[0].value.a;
605 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
606 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
607 	pad_begin = params[3].value.a;
608 	pad_end = params[3].value.b;
609 
610 	res = vm_get_flags(&utc->uctx, old_va, num_bytes, &vm_flags);
611 	if (res)
612 		return res;
613 	if (vm_flags & VM_FLAG_PERMANENT)
614 		return TEE_ERROR_ACCESS_DENIED;
615 
616 	res = vm_remap(&utc->uctx, &new_va, old_va, num_bytes, pad_begin,
617 		       pad_end);
618 	if (!res)
619 		reg_pair_from_64(new_va, &params[2].value.a,
620 				 &params[2].value.b);
621 
622 	return res;
623 }
624 
625 /* ldelf has the same architecture/register width as the kernel */
626 #ifdef ARM32
627 static const bool is_arm32 = true;
628 #else
629 static const bool is_arm32;
630 #endif
631 
632 static TEE_Result call_ldelf_dlopen(struct user_ta_ctx *utc, TEE_UUID *uuid,
633 				    uint32_t flags)
634 {
635 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
636 	TEE_Result res = TEE_ERROR_GENERIC;
637 	struct dl_entry_arg *arg = NULL;
638 	uint32_t panic_code = 0;
639 	uint32_t panicked = 0;
640 
641 	assert(uuid);
642 
643 	usr_stack -= ROUNDUP(sizeof(*arg), STACK_ALIGNMENT);
644 	arg = (struct dl_entry_arg *)usr_stack;
645 
646 	res = tee_mmu_check_access_rights(&utc->uctx,
647 					  TEE_MEMORY_ACCESS_READ |
648 					  TEE_MEMORY_ACCESS_WRITE |
649 					  TEE_MEMORY_ACCESS_ANY_OWNER,
650 					  (uaddr_t)arg, sizeof(*arg));
651 	if (res) {
652 		EMSG("ldelf stack is inaccessible!");
653 		return res;
654 	}
655 
656 	memset(arg, 0, sizeof(*arg));
657 	arg->cmd = LDELF_DL_ENTRY_DLOPEN;
658 	arg->dlopen.uuid = *uuid;
659 	arg->dlopen.flags = flags;
660 
661 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
662 				     usr_stack, utc->dl_entry_func,
663 				     is_arm32, &panicked, &panic_code);
664 	if (panicked) {
665 		EMSG("ldelf dl_entry function panicked");
666 		abort_print_current_ta();
667 		res = TEE_ERROR_TARGET_DEAD;
668 	}
669 	if (!res)
670 		res = arg->ret;
671 
672 	return res;
673 }
674 
675 static TEE_Result call_ldelf_dlsym(struct user_ta_ctx *utc, TEE_UUID *uuid,
676 				   const char *sym, size_t maxlen, vaddr_t *val)
677 {
678 	uaddr_t usr_stack = utc->ldelf_stack_ptr;
679 	TEE_Result res = TEE_ERROR_GENERIC;
680 	struct dl_entry_arg *arg = NULL;
681 	uint32_t panic_code = 0;
682 	uint32_t panicked = 0;
683 	size_t len = strnlen(sym, maxlen);
684 
685 	if (len == maxlen)
686 		return TEE_ERROR_BAD_PARAMETERS;
687 
688 	usr_stack -= ROUNDUP(sizeof(*arg) + len + 1, STACK_ALIGNMENT);
689 	arg = (struct dl_entry_arg *)usr_stack;
690 
691 	res = tee_mmu_check_access_rights(&utc->uctx,
692 					  TEE_MEMORY_ACCESS_READ |
693 					  TEE_MEMORY_ACCESS_WRITE |
694 					  TEE_MEMORY_ACCESS_ANY_OWNER,
695 					  (uaddr_t)arg, sizeof(*arg) + len + 1);
696 	if (res) {
697 		EMSG("ldelf stack is inaccessible!");
698 		return res;
699 	}
700 
701 	memset(arg, 0, sizeof(*arg));
702 	arg->cmd = LDELF_DL_ENTRY_DLSYM;
703 	arg->dlsym.uuid = *uuid;
704 	memcpy(arg->dlsym.symbol, sym, len);
705 	arg->dlsym.symbol[len] = '\0';
706 
707 	res = thread_enter_user_mode((vaddr_t)arg, 0, 0, 0,
708 				     usr_stack, utc->dl_entry_func,
709 				     is_arm32, &panicked, &panic_code);
710 	if (panicked) {
711 		EMSG("ldelf dl_entry function panicked");
712 		abort_print_current_ta();
713 		res = TEE_ERROR_TARGET_DEAD;
714 	}
715 	if (!res) {
716 		res = arg->ret;
717 		if (!res)
718 			*val = arg->dlsym.val;
719 	}
720 
721 	return res;
722 }
723 
724 static TEE_Result system_dlopen(struct tee_ta_session *cs, uint32_t param_types,
725 				TEE_Param params[TEE_NUM_PARAMS])
726 {
727 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
728 					  TEE_PARAM_TYPE_VALUE_INPUT,
729 					  TEE_PARAM_TYPE_NONE,
730 					  TEE_PARAM_TYPE_NONE);
731 	TEE_Result res = TEE_ERROR_GENERIC;
732 	struct tee_ta_session *s = NULL;
733 	struct user_ta_ctx *utc = NULL;
734 	TEE_UUID *uuid = NULL;
735 	uint32_t flags = 0;
736 
737 	if (exp_pt != param_types)
738 		return TEE_ERROR_BAD_PARAMETERS;
739 
740 	uuid = params[0].memref.buffer;
741 	if (!uuid || params[0].memref.size != sizeof(*uuid))
742 		return TEE_ERROR_BAD_PARAMETERS;
743 
744 	flags = params[1].value.a;
745 
746 	utc = to_user_ta_ctx(cs->ctx);
747 
748 	s = tee_ta_pop_current_session();
749 	res = call_ldelf_dlopen(utc, uuid, flags);
750 	tee_ta_push_current_session(s);
751 
752 	return res;
753 }
754 
755 static TEE_Result system_dlsym(struct tee_ta_session *cs, uint32_t param_types,
756 			       TEE_Param params[TEE_NUM_PARAMS])
757 {
758 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
759 					  TEE_PARAM_TYPE_MEMREF_INPUT,
760 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
761 					  TEE_PARAM_TYPE_NONE);
762 	TEE_Result res = TEE_ERROR_GENERIC;
763 	struct tee_ta_session *s = NULL;
764 	struct user_ta_ctx *utc = NULL;
765 	const char *sym = NULL;
766 	TEE_UUID *uuid = NULL;
767 	size_t maxlen = 0;
768 	vaddr_t va = 0;
769 
770 	if (exp_pt != param_types)
771 		return TEE_ERROR_BAD_PARAMETERS;
772 
773 	uuid = params[0].memref.buffer;
774 	if (uuid && params[0].memref.size != sizeof(*uuid))
775 		return TEE_ERROR_BAD_PARAMETERS;
776 
777 	sym = params[1].memref.buffer;
778 	if (!sym)
779 		return TEE_ERROR_BAD_PARAMETERS;
780 	maxlen = params[1].memref.size;
781 
782 	utc = to_user_ta_ctx(cs->ctx);
783 
784 	s = tee_ta_pop_current_session();
785 	res = call_ldelf_dlsym(utc, uuid, sym, maxlen, &va);
786 	tee_ta_push_current_session(s);
787 
788 	if (!res)
789 		reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
790 
791 	return res;
792 }
793 
794 static TEE_Result system_get_tpm_event_log(uint32_t param_types,
795 					   TEE_Param params[TEE_NUM_PARAMS])
796 {
797 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_OUTPUT,
798 					  TEE_PARAM_TYPE_NONE,
799 					  TEE_PARAM_TYPE_NONE,
800 					  TEE_PARAM_TYPE_NONE);
801 	size_t size = 0;
802 	TEE_Result res = TEE_SUCCESS;
803 
804 	if (exp_pt != param_types)
805 		return TEE_ERROR_BAD_PARAMETERS;
806 
807 	size = params[0].memref.size;
808 	res = tpm_get_event_log(params[0].memref.buffer, &size);
809 	params[0].memref.size = size;
810 
811 	return res;
812 }
813 
814 static TEE_Result open_session(uint32_t param_types __unused,
815 			       TEE_Param params[TEE_NUM_PARAMS] __unused,
816 			       void **sess_ctx)
817 {
818 	struct tee_ta_session *s = NULL;
819 	struct system_ctx *ctx = NULL;
820 
821 	/* Check that we're called from a user TA */
822 	s = tee_ta_get_calling_session();
823 	if (!s)
824 		return TEE_ERROR_ACCESS_DENIED;
825 	if (!is_user_ta_ctx(s->ctx))
826 		return TEE_ERROR_ACCESS_DENIED;
827 
828 	ctx = calloc(1, sizeof(*ctx));
829 	if (!ctx)
830 		return TEE_ERROR_OUT_OF_MEMORY;
831 
832 	*sess_ctx = ctx;
833 
834 	return TEE_SUCCESS;
835 }
836 
837 static void close_session(void *sess_ctx)
838 {
839 	struct system_ctx *ctx = sess_ctx;
840 
841 	handle_db_destroy(&ctx->db, ta_bin_close);
842 	free(ctx);
843 }
844 
845 static TEE_Result invoke_command(void *sess_ctx, uint32_t cmd_id,
846 				 uint32_t param_types,
847 				 TEE_Param params[TEE_NUM_PARAMS])
848 {
849 	struct tee_ta_session *s = tee_ta_get_calling_session();
850 
851 	switch (cmd_id) {
852 	case PTA_SYSTEM_ADD_RNG_ENTROPY:
853 		return system_rng_reseed(s, param_types, params);
854 	case PTA_SYSTEM_DERIVE_TA_UNIQUE_KEY:
855 		return system_derive_ta_unique_key(s, param_types, params);
856 	case PTA_SYSTEM_MAP_ZI:
857 		return system_map_zi(s, param_types, params);
858 	case PTA_SYSTEM_UNMAP:
859 		return system_unmap(s, param_types, params);
860 	case PTA_SYSTEM_OPEN_TA_BINARY:
861 		return system_open_ta_binary(sess_ctx, param_types, params);
862 	case PTA_SYSTEM_CLOSE_TA_BINARY:
863 		return system_close_ta_binary(sess_ctx, param_types, params);
864 	case PTA_SYSTEM_MAP_TA_BINARY:
865 		return system_map_ta_binary(sess_ctx, s, param_types, params);
866 	case PTA_SYSTEM_COPY_FROM_TA_BINARY:
867 		return system_copy_from_ta_binary(sess_ctx, param_types,
868 						  params);
869 	case PTA_SYSTEM_SET_PROT:
870 		return system_set_prot(s, param_types, params);
871 	case PTA_SYSTEM_REMAP:
872 		return system_remap(s, param_types, params);
873 	case PTA_SYSTEM_DLOPEN:
874 		return system_dlopen(s, param_types, params);
875 	case PTA_SYSTEM_DLSYM:
876 		return system_dlsym(s, param_types, params);
877 	case PTA_SYSTEM_GET_TPM_EVENT_LOG:
878 		return system_get_tpm_event_log(param_types, params);
879 	default:
880 		break;
881 	}
882 
883 	return TEE_ERROR_NOT_IMPLEMENTED;
884 }
885 
886 pseudo_ta_register(.uuid = PTA_SYSTEM_UUID, .name = "system.pta",
887 		   .flags = PTA_DEFAULT_FLAGS | TA_FLAG_CONCURRENT,
888 		   .open_session_entry_point = open_session,
889 		   .close_session_entry_point = close_session,
890 		   .invoke_command_entry_point = invoke_command);
891