xref: /optee_os/core/kernel/tee_ta_manager.c (revision 6cfa381e534b362afbd103f526b132048e54ba47)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <malloc.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_cryp.h>
31 #include <tee/tee_svc_storage.h>
32 #include <trace.h>
33 #include <types_ext.h>
34 #include <user_ta_header.h>
35 #include <utee_types.h>
36 #include <util.h>
37 
38 #if defined(CFG_TA_STATS)
39 #define MAX_DUMP_SESS_NUM	(16)
40 struct tee_ta_dump_stats {
41 	TEE_UUID uuid;
42 	uint32_t panicked;	/* True if TA has panicked */
43 	uint32_t sess_num;	/* Number of opened session */
44 	struct malloc_stats heap;
45 };
46 
47 struct tee_ta_dump_ctx {
48 	TEE_UUID uuid;
49 	uint32_t panicked;
50 	bool is_user_ta;
51 	uint32_t sess_num;
52 	uint32_t sess_id[MAX_DUMP_SESS_NUM];
53 };
54 #endif
55 
56 /* This mutex protects the critical section in tee_ta_init_session */
57 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
58 /* This condvar is used when waiting for a TA context to become initialized */
59 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
63 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
64 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
65 static size_t tee_ta_single_instance_count;
66 #endif
67 
68 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
69 static void lock_single_instance(void)
70 {
71 }
72 
73 static void unlock_single_instance(void)
74 {
75 }
76 
77 static bool has_single_instance_lock(void)
78 {
79 	return false;
80 }
81 #else
82 static void lock_single_instance(void)
83 {
84 	/* Requires tee_ta_mutex to be held */
85 	if (tee_ta_single_instance_thread != thread_get_id()) {
86 		/* Wait until the single-instance lock is available. */
87 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
88 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
89 
90 		tee_ta_single_instance_thread = thread_get_id();
91 		assert(tee_ta_single_instance_count == 0);
92 	}
93 
94 	tee_ta_single_instance_count++;
95 }
96 
97 static void unlock_single_instance(void)
98 {
99 	/* Requires tee_ta_mutex to be held */
100 	assert(tee_ta_single_instance_thread == thread_get_id());
101 	assert(tee_ta_single_instance_count > 0);
102 
103 	tee_ta_single_instance_count--;
104 	if (tee_ta_single_instance_count == 0) {
105 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
106 		condvar_signal(&tee_ta_cv);
107 	}
108 }
109 
110 static bool has_single_instance_lock(void)
111 {
112 	/* Requires tee_ta_mutex to be held */
113 	return tee_ta_single_instance_thread == thread_get_id();
114 }
115 #endif
116 
117 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
118 {
119 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
120 	return container_of(sess, struct tee_ta_session, ts_sess);
121 }
122 
123 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
124 {
125 	if (is_ta_ctx(ctx))
126 		return to_ta_ctx(ctx);
127 
128 	if (is_stmm_ctx(ctx))
129 		return &(to_stmm_ctx(ctx)->ta_ctx);
130 
131 	panic("bad context");
132 }
133 
134 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
135 {
136 	bool rc = true;
137 
138 	if (ctx->flags & TA_FLAG_CONCURRENT)
139 		return true;
140 
141 	mutex_lock(&tee_ta_mutex);
142 
143 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
144 		lock_single_instance();
145 
146 	if (has_single_instance_lock()) {
147 		if (ctx->busy) {
148 			/*
149 			 * We're holding the single-instance lock and the
150 			 * TA is busy, as waiting now would only cause a
151 			 * dead-lock, we release the lock and return false.
152 			 */
153 			rc = false;
154 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
155 				unlock_single_instance();
156 		}
157 	} else {
158 		/*
159 		 * We're not holding the single-instance lock, we're free to
160 		 * wait for the TA to become available.
161 		 */
162 		while (ctx->busy)
163 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
164 	}
165 
166 	/* Either it's already true or we should set it to true */
167 	ctx->busy = true;
168 
169 	mutex_unlock(&tee_ta_mutex);
170 	return rc;
171 }
172 
173 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
174 {
175 	if (!tee_ta_try_set_busy(ctx))
176 		panic();
177 }
178 
179 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
180 {
181 	if (ctx->flags & TA_FLAG_CONCURRENT)
182 		return;
183 
184 	mutex_lock(&tee_ta_mutex);
185 
186 	assert(ctx->busy);
187 	ctx->busy = false;
188 	condvar_signal(&ctx->busy_cv);
189 
190 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
191 		unlock_single_instance();
192 
193 	mutex_unlock(&tee_ta_mutex);
194 }
195 
196 static void dec_session_ref_count(struct tee_ta_session *s)
197 {
198 	assert(s->ref_count > 0);
199 	s->ref_count--;
200 	if (s->ref_count == 1)
201 		condvar_signal(&s->refc_cv);
202 }
203 
204 void tee_ta_put_session(struct tee_ta_session *s)
205 {
206 	mutex_lock(&tee_ta_mutex);
207 
208 	if (s->lock_thread == thread_get_id()) {
209 		s->lock_thread = THREAD_ID_INVALID;
210 		condvar_signal(&s->lock_cv);
211 	}
212 	dec_session_ref_count(s);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 }
216 
217 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
218 			struct tee_ta_session_head *open_sessions)
219 {
220 	struct tee_ta_session *s = NULL;
221 	struct tee_ta_session *found = NULL;
222 
223 	TAILQ_FOREACH(s, open_sessions, link) {
224 		if (s->id == id) {
225 			found = s;
226 			break;
227 		}
228 	}
229 
230 	return found;
231 }
232 
233 struct tee_ta_session *tee_ta_find_session(uint32_t id,
234 			struct tee_ta_session_head *open_sessions)
235 {
236 	struct tee_ta_session *s = NULL;
237 
238 	mutex_lock(&tee_ta_mutex);
239 
240 	s = tee_ta_find_session_nolock(id, open_sessions);
241 
242 	mutex_unlock(&tee_ta_mutex);
243 
244 	return s;
245 }
246 
247 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
248 			struct tee_ta_session_head *open_sessions)
249 {
250 	struct tee_ta_session *s;
251 
252 	mutex_lock(&tee_ta_mutex);
253 
254 	while (true) {
255 		s = tee_ta_find_session_nolock(id, open_sessions);
256 		if (!s)
257 			break;
258 		if (s->unlink) {
259 			s = NULL;
260 			break;
261 		}
262 		s->ref_count++;
263 		if (!exclusive)
264 			break;
265 
266 		assert(s->lock_thread != thread_get_id());
267 
268 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
269 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
270 
271 		if (s->unlink) {
272 			dec_session_ref_count(s);
273 			s = NULL;
274 			break;
275 		}
276 
277 		s->lock_thread = thread_get_id();
278 		break;
279 	}
280 
281 	mutex_unlock(&tee_ta_mutex);
282 	return s;
283 }
284 
285 static void tee_ta_unlink_session(struct tee_ta_session *s,
286 			struct tee_ta_session_head *open_sessions)
287 {
288 	mutex_lock(&tee_ta_mutex);
289 
290 	assert(s->ref_count >= 1);
291 	assert(s->lock_thread == thread_get_id());
292 	assert(!s->unlink);
293 
294 	s->unlink = true;
295 	condvar_broadcast(&s->lock_cv);
296 
297 	while (s->ref_count != 1)
298 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
299 
300 	TAILQ_REMOVE(open_sessions, s, link);
301 
302 	mutex_unlock(&tee_ta_mutex);
303 }
304 
305 static void destroy_session(struct tee_ta_session *s,
306 			    struct tee_ta_session_head *open_sessions)
307 {
308 #if defined(CFG_FTRACE_SUPPORT)
309 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
310 		ts_push_current_session(&s->ts_sess);
311 		s->ts_sess.fbuf = NULL;
312 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
313 		ts_pop_current_session();
314 	}
315 #endif
316 
317 	tee_ta_unlink_session(s, open_sessions);
318 #if defined(CFG_TA_GPROF_SUPPORT)
319 	free(s->ts_sess.sbuf);
320 #endif
321 	free(s);
322 }
323 
324 static void destroy_context(struct tee_ta_ctx *ctx)
325 {
326 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
327 
328 	condvar_destroy(&ctx->busy_cv);
329 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
330 }
331 
332 /*
333  * tee_ta_context_find - Find TA in session list based on a UUID (input)
334  * Returns a pointer to the session
335  */
336 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
337 {
338 	struct tee_ta_ctx *ctx;
339 
340 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
341 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
342 			return ctx;
343 	}
344 
345 	return NULL;
346 }
347 
348 /* check if requester (client ID) matches session initial client */
349 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
350 {
351 	if (id == KERN_IDENTITY)
352 		return TEE_SUCCESS;
353 
354 	if (id == NSAPP_IDENTITY) {
355 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
356 			DMSG("nsec tries to hijack TA session");
357 			return TEE_ERROR_ACCESS_DENIED;
358 		}
359 		return TEE_SUCCESS;
360 	}
361 
362 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
363 		DMSG("client id mismatch");
364 		return TEE_ERROR_ACCESS_DENIED;
365 	}
366 	return TEE_SUCCESS;
367 }
368 
369 /*
370  * Check if invocation parameters matches TA properties
371  *
372  * @s - current session handle
373  * @param - already identified memory references hold a valid 'mobj'.
374  *
375  * Policy:
376  * - All TAs can access 'non-secure' shared memory.
377  * - All TAs can access TEE private memory (seccpy)
378  * - Only SDP flagged TAs can accept SDP memory references.
379  */
380 #ifndef CFG_SECURE_DATA_PATH
381 static bool check_params(struct tee_ta_session *sess __unused,
382 			 struct tee_ta_param *param __unused)
383 {
384 	/*
385 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
386 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
387 	 * permissions regarding memory reference parameters.
388 	 */
389 	return true;
390 }
391 #else
392 static bool check_params(struct tee_ta_session *sess,
393 			 struct tee_ta_param *param)
394 {
395 	int n;
396 
397 	/*
398 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
399 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
400 	 */
401 	if (sess->ts_sess.ctx &&
402 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
403 		return true;
404 
405 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
406 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
407 		struct param_mem *mem = &param->u[n].mem;
408 
409 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
410 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
411 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
412 			continue;
413 		if (!mem->size)
414 			continue;
415 		if (mobj_is_sdp_mem(mem->mobj))
416 			return false;
417 	}
418 	return true;
419 }
420 #endif
421 
422 static void set_invoke_timeout(struct tee_ta_session *sess,
423 				      uint32_t cancel_req_to)
424 {
425 	TEE_Time current_time;
426 	TEE_Time cancel_time;
427 
428 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
429 		goto infinite;
430 
431 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
432 		goto infinite;
433 
434 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
435 			 &cancel_time.seconds))
436 		goto infinite;
437 
438 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
439 	if (cancel_time.millis > 1000) {
440 		if (ADD_OVERFLOW(current_time.seconds, 1,
441 				 &cancel_time.seconds))
442 			goto infinite;
443 
444 		cancel_time.seconds++;
445 		cancel_time.millis -= 1000;
446 	}
447 
448 	sess->cancel_time = cancel_time;
449 	return;
450 
451 infinite:
452 	sess->cancel_time.seconds = UINT32_MAX;
453 	sess->cancel_time.millis = UINT32_MAX;
454 }
455 
456 /*-----------------------------------------------------------------------------
457  * Close a Trusted Application and free available resources
458  *---------------------------------------------------------------------------*/
459 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
460 				struct tee_ta_session_head *open_sessions,
461 				const TEE_Identity *clnt_id)
462 {
463 	struct tee_ta_session *sess = NULL;
464 	struct tee_ta_ctx *ctx = NULL;
465 	struct ts_ctx *ts_ctx = NULL;
466 	bool keep_alive = false;
467 
468 	DMSG("csess 0x%" PRIxVA " id %u",
469 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
470 
471 	if (!csess)
472 		return TEE_ERROR_ITEM_NOT_FOUND;
473 
474 	sess = tee_ta_get_session(csess->id, true, open_sessions);
475 
476 	if (!sess) {
477 		EMSG("session 0x%" PRIxVA " to be removed is not found",
478 		     (vaddr_t)csess);
479 		return TEE_ERROR_ITEM_NOT_FOUND;
480 	}
481 
482 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
483 		tee_ta_put_session(sess);
484 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
485 	}
486 
487 	DMSG("Destroy session");
488 
489 	ts_ctx = sess->ts_sess.ctx;
490 	if (!ts_ctx) {
491 		destroy_session(sess, open_sessions);
492 		return TEE_SUCCESS;
493 	}
494 
495 	ctx = ts_to_ta_ctx(ts_ctx);
496 	if (ctx->panicked) {
497 		destroy_session(sess, open_sessions);
498 	} else {
499 		tee_ta_set_busy(ctx);
500 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
501 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
502 		destroy_session(sess, open_sessions);
503 		tee_ta_clear_busy(ctx);
504 	}
505 
506 	mutex_lock(&tee_ta_mutex);
507 
508 	if (ctx->ref_count <= 0)
509 		panic();
510 
511 	ctx->ref_count--;
512 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
513 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
514 	if (!ctx->ref_count && (ctx->panicked || !keep_alive)) {
515 		if (!ctx->is_releasing) {
516 			TAILQ_REMOVE(&tee_ctxes, ctx, link);
517 			ctx->is_releasing = true;
518 		}
519 		mutex_unlock(&tee_ta_mutex);
520 
521 		destroy_context(ctx);
522 	} else
523 		mutex_unlock(&tee_ta_mutex);
524 
525 	return TEE_SUCCESS;
526 }
527 
528 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
529 						   const TEE_UUID *uuid)
530 {
531 	struct tee_ta_ctx *ctx = NULL;
532 
533 	while (true) {
534 		ctx = tee_ta_context_find(uuid);
535 		if (!ctx)
536 			return TEE_ERROR_ITEM_NOT_FOUND;
537 
538 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
539 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
540 			break;
541 		/*
542 		 * Context is still initializing, wait here until it's
543 		 * fully initialized. Note that we're searching for the
544 		 * context again since it may have been removed while we
545 		 * where sleeping.
546 		 */
547 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
548 	}
549 
550 	/*
551 	 * If TA isn't single instance it should be loaded as new
552 	 * instance instead of doing anything with this instance.
553 	 * So tell the caller that we didn't find the TA it the
554 	 * caller will load a new instance.
555 	 */
556 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
557 		return TEE_ERROR_ITEM_NOT_FOUND;
558 
559 	/*
560 	 * The TA is single instance, if it isn't multi session we
561 	 * can't create another session unless its reference is zero
562 	 */
563 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
564 		return TEE_ERROR_BUSY;
565 
566 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
567 
568 	ctx->ref_count++;
569 	s->ts_sess.ctx = &ctx->ts_ctx;
570 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
571 	return TEE_SUCCESS;
572 }
573 
574 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
575 {
576 	struct tee_ta_session *last = NULL;
577 	uint32_t saved = 0;
578 	uint32_t id = 1;
579 
580 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
581 	if (last) {
582 		/* This value is less likely to be already used */
583 		id = last->id + 1;
584 		if (!id)
585 			id++; /* 0 is not valid */
586 	}
587 
588 	saved = id;
589 	do {
590 		if (!tee_ta_find_session_nolock(id, open_sessions))
591 			return id;
592 		id++;
593 		if (!id)
594 			id++;
595 	} while (id != saved);
596 
597 	return 0;
598 }
599 
600 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
601 				struct tee_ta_session_head *open_sessions,
602 				const TEE_UUID *uuid,
603 				struct tee_ta_session **sess)
604 {
605 	TEE_Result res;
606 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
607 
608 	*err = TEE_ORIGIN_TEE;
609 	if (!s)
610 		return TEE_ERROR_OUT_OF_MEMORY;
611 
612 	s->cancel_mask = true;
613 	condvar_init(&s->refc_cv);
614 	condvar_init(&s->lock_cv);
615 	s->lock_thread = THREAD_ID_INVALID;
616 	s->ref_count = 1;
617 
618 	mutex_lock(&tee_ta_mutex);
619 	s->id = new_session_id(open_sessions);
620 	if (!s->id) {
621 		res = TEE_ERROR_OVERFLOW;
622 		goto err_mutex_unlock;
623 	}
624 
625 	TAILQ_INSERT_TAIL(open_sessions, s, link);
626 
627 	/* Look for already loaded TA */
628 	res = tee_ta_init_session_with_context(s, uuid);
629 	mutex_unlock(&tee_ta_mutex);
630 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
631 		goto out;
632 
633 	/* Look for secure partition */
634 	res = stmm_init_session(uuid, s);
635 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
636 		goto out;
637 
638 	/* Look for pseudo TA */
639 	res = tee_ta_init_pseudo_ta_session(uuid, s);
640 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
641 		goto out;
642 
643 	/* Look for user TA */
644 	res = tee_ta_init_user_ta_session(uuid, s);
645 
646 out:
647 	if (!res) {
648 		*sess = s;
649 		return TEE_SUCCESS;
650 	}
651 
652 	mutex_lock(&tee_ta_mutex);
653 	TAILQ_REMOVE(open_sessions, s, link);
654 err_mutex_unlock:
655 	mutex_unlock(&tee_ta_mutex);
656 	free(s);
657 	return res;
658 }
659 
660 static void release_ta_ctx(struct tee_ta_ctx *ctx)
661 {
662 	bool was_releasing = false;
663 
664 	mutex_lock(&tee_ta_mutex);
665 	was_releasing = ctx->is_releasing;
666 	ctx->is_releasing = true;
667 	if (!was_releasing) {
668 		DMSG("Releasing panicked TA ctx");
669 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
670 	}
671 	mutex_unlock(&tee_ta_mutex);
672 
673 	if (!was_releasing)
674 		ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
675 }
676 
677 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
678 			       struct tee_ta_session **sess,
679 			       struct tee_ta_session_head *open_sessions,
680 			       const TEE_UUID *uuid,
681 			       const TEE_Identity *clnt_id,
682 			       uint32_t cancel_req_to,
683 			       struct tee_ta_param *param)
684 {
685 	TEE_Result res = TEE_SUCCESS;
686 	struct tee_ta_session *s = NULL;
687 	struct tee_ta_ctx *ctx = NULL;
688 	struct ts_ctx *ts_ctx = NULL;
689 	bool panicked = false;
690 	bool was_busy = false;
691 
692 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
693 	if (res != TEE_SUCCESS) {
694 		DMSG("init session failed 0x%x", res);
695 		return res;
696 	}
697 
698 	if (!check_params(s, param))
699 		return TEE_ERROR_BAD_PARAMETERS;
700 
701 	ts_ctx = s->ts_sess.ctx;
702 	ctx = ts_to_ta_ctx(ts_ctx);
703 
704 	if (tee_ta_try_set_busy(ctx)) {
705 		if (!ctx->panicked) {
706 			/* Save identity of the owner of the session */
707 			s->clnt_id = *clnt_id;
708 			s->param = param;
709 			set_invoke_timeout(s, cancel_req_to);
710 			res = ts_ctx->ops->enter_open_session(&s->ts_sess);
711 			s->param = NULL;
712 		}
713 
714 		panicked = ctx->panicked;
715 		if (panicked) {
716 			release_ta_ctx(ctx);
717 			res = TEE_ERROR_TARGET_DEAD;
718 		}
719 
720 		tee_ta_clear_busy(ctx);
721 	} else {
722 		/* Deadlock avoided */
723 		res = TEE_ERROR_BUSY;
724 		was_busy = true;
725 	}
726 
727 	/*
728 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
729 	 * apart from panicking.
730 	 */
731 	if (panicked || was_busy)
732 		*err = TEE_ORIGIN_TEE;
733 	else
734 		*err = s->err_origin;
735 
736 	tee_ta_put_session(s);
737 	if (panicked || res != TEE_SUCCESS)
738 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
739 
740 	if (!res)
741 		*sess = s;
742 	else
743 		EMSG("Failed. Return error 0x%x", res);
744 
745 	return res;
746 }
747 
748 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
749 				 struct tee_ta_session *sess,
750 				 const TEE_Identity *clnt_id,
751 				 uint32_t cancel_req_to, uint32_t cmd,
752 				 struct tee_ta_param *param)
753 {
754 	struct tee_ta_ctx *ta_ctx = NULL;
755 	struct ts_ctx *ts_ctx = NULL;
756 	TEE_Result res = TEE_SUCCESS;
757 	bool panicked = false;
758 
759 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
760 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
761 
762 	if (!check_params(sess, param))
763 		return TEE_ERROR_BAD_PARAMETERS;
764 
765 	ts_ctx = sess->ts_sess.ctx;
766 	ta_ctx = ts_to_ta_ctx(ts_ctx);
767 
768 	tee_ta_set_busy(ta_ctx);
769 
770 	if (!ta_ctx->panicked) {
771 		sess->param = param;
772 		set_invoke_timeout(sess, cancel_req_to);
773 		res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
774 		sess->param = NULL;
775 	}
776 
777 	panicked = ta_ctx->panicked;
778 	if (panicked) {
779 		release_ta_ctx(ta_ctx);
780 		res = TEE_ERROR_TARGET_DEAD;
781 	}
782 
783 	tee_ta_clear_busy(ta_ctx);
784 
785 	/*
786 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
787 	 * apart from panicking.
788 	 */
789 	if (panicked)
790 		*err = TEE_ORIGIN_TEE;
791 	else
792 		*err = sess->err_origin;
793 
794 	/* Short buffer is not an effective error case */
795 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
796 		DMSG("Error: %x of %d", res, *err);
797 
798 	return res;
799 }
800 
801 #if defined(CFG_TA_STATS)
802 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
803 				   struct tee_ta_param *param)
804 {
805 	TEE_Result res = TEE_SUCCESS;
806 	struct tee_ta_ctx *ctx = NULL;
807 	struct ts_ctx *ts_ctx = NULL;
808 	bool panicked = false;
809 
810 	ts_ctx = s->ts_sess.ctx;
811 	if (!ts_ctx)
812 		return TEE_ERROR_ITEM_NOT_FOUND;
813 
814 	if (is_user_ta_ctx(ts_ctx) &&
815 	    to_user_ta_ctx(ts_ctx)->uctx.is_initializing)
816 		return TEE_ERROR_BAD_STATE;
817 
818 	ctx = ts_to_ta_ctx(ts_ctx);
819 
820 	if (tee_ta_try_set_busy(ctx)) {
821 		if (!ctx->panicked) {
822 			s->param = param;
823 			set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
824 			res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
825 			s->param = NULL;
826 		}
827 
828 		panicked = ctx->panicked;
829 		if (panicked) {
830 			release_ta_ctx(ctx);
831 			res = TEE_ERROR_TARGET_DEAD;
832 		}
833 
834 		tee_ta_clear_busy(ctx);
835 	} else {
836 		/* Deadlock avoided */
837 		res = TEE_ERROR_BUSY;
838 	}
839 
840 	return res;
841 }
842 
843 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
844 {
845 	struct tee_ta_session *sess = NULL;
846 	struct tee_ta_session_head *open_sessions = NULL;
847 	struct tee_ta_ctx *ctx = NULL;
848 	unsigned int n = 0;
849 
850 	nsec_sessions_list_head(&open_sessions);
851 	/*
852 	 * Scan all sessions opened from secure side by searching through
853 	 * all available TA instances and for each context, scan all opened
854 	 * sessions.
855 	 */
856 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
857 		unsigned int cnt = 0;
858 
859 		if (!is_user_ta_ctx(&ctx->ts_ctx))
860 			continue;
861 
862 		memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
863 		       sizeof(ctx->ts_ctx.uuid));
864 		dump_ctx[n].panicked = ctx->panicked;
865 		dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
866 		TAILQ_FOREACH(sess, open_sessions, link) {
867 			if (sess->ts_sess.ctx == &ctx->ts_ctx) {
868 				if (cnt == MAX_DUMP_SESS_NUM)
869 					break;
870 
871 				dump_ctx[n].sess_id[cnt] = sess->id;
872 				cnt++;
873 			}
874 		}
875 
876 		dump_ctx[n].sess_num = cnt;
877 		n++;
878 	}
879 }
880 
881 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
882 				struct tee_ta_dump_stats *dump_stats,
883 				size_t ta_count)
884 {
885 	TEE_Result res = TEE_SUCCESS;
886 	struct tee_ta_session *sess = NULL;
887 	struct tee_ta_session_head *open_sessions = NULL;
888 	struct tee_ta_param param = { };
889 	unsigned int i = 0;
890 	unsigned int j = 0;
891 
892 	nsec_sessions_list_head(&open_sessions);
893 
894 	for (i = 0; i < ta_count; i++) {
895 		struct tee_ta_dump_stats *stats = &dump_stats[i];
896 
897 		memcpy(&stats->uuid, &dump_ctx[i].uuid,
898 		       sizeof(dump_ctx[i].uuid));
899 		stats->panicked = dump_ctx[i].panicked;
900 		stats->sess_num = dump_ctx[i].sess_num;
901 
902 		/* Find a session from dump context */
903 		for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
904 			sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
905 						  open_sessions);
906 
907 		if (!sess)
908 			continue;
909 		/* If session is existing, get its heap stats */
910 		memset(&param, 0, sizeof(struct tee_ta_param));
911 		param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
912 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
913 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
914 					      TEE_PARAM_TYPE_NONE);
915 		res = dump_ta_memstats(sess, &param);
916 		if (res == TEE_SUCCESS) {
917 			stats->heap.allocated = param.u[0].val.a;
918 			stats->heap.max_allocated = param.u[0].val.b;
919 			stats->heap.size = param.u[1].val.a;
920 			stats->heap.num_alloc_fail = param.u[1].val.b;
921 			stats->heap.biggest_alloc_fail = param.u[2].val.a;
922 			stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
923 		} else {
924 			memset(&stats->heap, 0, sizeof(stats->heap));
925 		}
926 		tee_ta_put_session(sess);
927 	}
928 
929 	return TEE_SUCCESS;
930 }
931 
932 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
933 {
934 	TEE_Result res = TEE_SUCCESS;
935 	struct tee_ta_dump_stats *dump_stats = NULL;
936 	struct tee_ta_dump_ctx *dump_ctx = NULL;
937 	struct tee_ta_ctx *ctx = NULL;
938 	size_t sz = 0;
939 	size_t ta_count = 0;
940 
941 	if (!buf_size)
942 		return TEE_ERROR_BAD_PARAMETERS;
943 
944 	mutex_lock(&tee_ta_mutex);
945 
946 	/* Go through all available TA and calc out the actual buffer size. */
947 	TAILQ_FOREACH(ctx, &tee_ctxes, link)
948 		if (is_user_ta_ctx(&ctx->ts_ctx))
949 			ta_count++;
950 
951 	sz = sizeof(struct tee_ta_dump_stats) * ta_count;
952 	if (!sz) {
953 		/* sz = 0 means there is no UTA, return no item found. */
954 		res = TEE_ERROR_ITEM_NOT_FOUND;
955 	} else if (!buf || *buf_size < sz) {
956 		/*
957 		 * buf is null or pass size less than actual size
958 		 * means caller try to query the buffer size.
959 		 * update *buf_size.
960 		 */
961 		*buf_size = sz;
962 		res = TEE_ERROR_SHORT_BUFFER;
963 	} else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
964 		DMSG("Data alignment");
965 		res = TEE_ERROR_BAD_PARAMETERS;
966 	} else {
967 		dump_stats = (struct tee_ta_dump_stats *)buf;
968 		dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
969 		if (!dump_ctx)
970 			res = TEE_ERROR_OUT_OF_MEMORY;
971 		else
972 			init_dump_ctx(dump_ctx);
973 	}
974 	mutex_unlock(&tee_ta_mutex);
975 
976 	if (res != TEE_SUCCESS)
977 		return res;
978 
979 	/* Dump user ta stats by iterating dump_ctx[] */
980 	res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
981 	if (res == TEE_SUCCESS)
982 		*buf_size = sz;
983 
984 	free(dump_ctx);
985 	return res;
986 }
987 #endif
988 
989 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
990 				 struct tee_ta_session *sess,
991 				 const TEE_Identity *clnt_id)
992 {
993 	*err = TEE_ORIGIN_TEE;
994 
995 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
996 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
997 
998 	sess->cancel = true;
999 	return TEE_SUCCESS;
1000 }
1001 
1002 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1003 {
1004 	TEE_Time current_time;
1005 
1006 	if (s->cancel_mask)
1007 		return false;
1008 
1009 	if (s->cancel)
1010 		return true;
1011 
1012 	if (s->cancel_time.seconds == UINT32_MAX)
1013 		return false;
1014 
1015 	if (curr_time != NULL)
1016 		current_time = *curr_time;
1017 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
1018 		return false;
1019 
1020 	if (current_time.seconds > s->cancel_time.seconds ||
1021 	    (current_time.seconds == s->cancel_time.seconds &&
1022 	     current_time.millis >= s->cancel_time.millis)) {
1023 		return true;
1024 	}
1025 
1026 	return false;
1027 }
1028 
1029 #if defined(CFG_TA_GPROF_SUPPORT)
1030 void tee_ta_gprof_sample_pc(vaddr_t pc)
1031 {
1032 	struct ts_session *s = ts_get_current_session();
1033 	struct user_ta_ctx *utc = NULL;
1034 	struct sample_buf *sbuf = NULL;
1035 	TEE_Result res = 0;
1036 	size_t idx = 0;
1037 
1038 	sbuf = s->sbuf;
1039 	if (!sbuf || !sbuf->enabled)
1040 		return; /* PC sampling is not enabled */
1041 
1042 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1043 	if (idx < sbuf->nsamples) {
1044 		utc = to_user_ta_ctx(s->ctx);
1045 		res = vm_check_access_rights(&utc->uctx,
1046 					     TEE_MEMORY_ACCESS_READ |
1047 					     TEE_MEMORY_ACCESS_WRITE |
1048 					     TEE_MEMORY_ACCESS_ANY_OWNER,
1049 					     (uaddr_t)&sbuf->samples[idx],
1050 					     sizeof(*sbuf->samples));
1051 		if (res != TEE_SUCCESS)
1052 			return;
1053 		sbuf->samples[idx]++;
1054 	}
1055 	sbuf->count++;
1056 }
1057 
1058 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1059 				       uint64_t now)
1060 {
1061 	struct sample_buf *sbuf = s->sbuf;
1062 
1063 	if (!sbuf)
1064 		return;
1065 
1066 	if (suspend) {
1067 		assert(sbuf->usr_entered);
1068 		sbuf->usr += now - sbuf->usr_entered;
1069 		sbuf->usr_entered = 0;
1070 	} else {
1071 		assert(!sbuf->usr_entered);
1072 		if (!now)
1073 			now++; /* 0 is reserved */
1074 		sbuf->usr_entered = now;
1075 	}
1076 }
1077 
1078 /*
1079  * Update user-mode CPU time for the current session
1080  * @suspend: true if session is being suspended (leaving user mode), false if
1081  * it is resumed (entering user mode)
1082  */
1083 static void tee_ta_update_session_utime(bool suspend)
1084 {
1085 	struct ts_session *s = ts_get_current_session();
1086 	uint64_t now = barrier_read_counter_timer();
1087 
1088 	gprof_update_session_utime(suspend, s, now);
1089 }
1090 
1091 void tee_ta_update_session_utime_suspend(void)
1092 {
1093 	tee_ta_update_session_utime(true);
1094 }
1095 
1096 void tee_ta_update_session_utime_resume(void)
1097 {
1098 	tee_ta_update_session_utime(false);
1099 }
1100 #endif
1101 
1102 #if defined(CFG_FTRACE_SUPPORT)
1103 static void ftrace_update_times(bool suspend)
1104 {
1105 	struct ts_session *s = ts_get_current_session_may_fail();
1106 	struct ftrace_buf *fbuf = NULL;
1107 	uint64_t now = 0;
1108 	uint32_t i = 0;
1109 
1110 	if (!s)
1111 		return;
1112 
1113 	now = barrier_read_counter_timer();
1114 
1115 	fbuf = s->fbuf;
1116 	if (!fbuf)
1117 		return;
1118 
1119 	if (suspend) {
1120 		fbuf->suspend_time = now;
1121 	} else {
1122 		for (i = 0; i <= fbuf->ret_idx; i++)
1123 			fbuf->begin_time[i] += now - fbuf->suspend_time;
1124 	}
1125 }
1126 
1127 void tee_ta_ftrace_update_times_suspend(void)
1128 {
1129 	ftrace_update_times(true);
1130 }
1131 
1132 void tee_ta_ftrace_update_times_resume(void)
1133 {
1134 	ftrace_update_times(false);
1135 }
1136 #endif
1137 
1138 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1139 {
1140 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1141 }
1142