xref: /optee_os/core/kernel/tee_ta_manager.c (revision 79f8990d9d28539864d8f97f9f1cb32e289e595f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <malloc.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <pta_stats.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <tee_api_types.h>
29 #include <tee/entry_std.h>
30 #include <tee/tee_obj.h>
31 #include <tee/tee_svc_cryp.h>
32 #include <tee/tee_svc_storage.h>
33 #include <trace.h>
34 #include <types_ext.h>
35 #include <user_ta_header.h>
36 #include <utee_types.h>
37 #include <util.h>
38 
39 #if defined(CFG_TA_STATS)
40 #define MAX_DUMP_SESS_NUM	(16)
41 
42 struct tee_ta_dump_ctx {
43 	TEE_UUID uuid;
44 	uint32_t panicked;
45 	bool is_user_ta;
46 	uint32_t sess_num;
47 	uint32_t sess_id[MAX_DUMP_SESS_NUM];
48 };
49 #endif
50 
51 /* This mutex protects the critical section in tee_ta_init_session */
52 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
53 /* This condvar is used when waiting for a TA context to become initialized */
54 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
55 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
56 
57 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
58 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
59 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
60 static size_t tee_ta_single_instance_count;
61 #endif
62 
63 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
64 static void lock_single_instance(void)
65 {
66 }
67 
68 static void unlock_single_instance(void)
69 {
70 }
71 
72 static bool has_single_instance_lock(void)
73 {
74 	return false;
75 }
76 #else
77 static void lock_single_instance(void)
78 {
79 	/* Requires tee_ta_mutex to be held */
80 	if (tee_ta_single_instance_thread != thread_get_id()) {
81 		/* Wait until the single-instance lock is available. */
82 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
83 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
84 
85 		tee_ta_single_instance_thread = thread_get_id();
86 		assert(tee_ta_single_instance_count == 0);
87 	}
88 
89 	tee_ta_single_instance_count++;
90 }
91 
92 static void unlock_single_instance(void)
93 {
94 	/* Requires tee_ta_mutex to be held */
95 	assert(tee_ta_single_instance_thread == thread_get_id());
96 	assert(tee_ta_single_instance_count > 0);
97 
98 	tee_ta_single_instance_count--;
99 	if (tee_ta_single_instance_count == 0) {
100 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
101 		condvar_signal(&tee_ta_cv);
102 	}
103 }
104 
105 static bool has_single_instance_lock(void)
106 {
107 	/* Requires tee_ta_mutex to be held */
108 	return tee_ta_single_instance_thread == thread_get_id();
109 }
110 #endif
111 
112 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
113 {
114 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
115 	return container_of(sess, struct tee_ta_session, ts_sess);
116 }
117 
118 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
119 {
120 	if (is_ta_ctx(ctx))
121 		return to_ta_ctx(ctx);
122 
123 	if (is_stmm_ctx(ctx))
124 		return &(to_stmm_ctx(ctx)->ta_ctx);
125 
126 	panic("bad context");
127 }
128 
129 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
130 {
131 	bool rc = true;
132 
133 	if (ctx->flags & TA_FLAG_CONCURRENT)
134 		return true;
135 
136 	mutex_lock(&tee_ta_mutex);
137 
138 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
139 		lock_single_instance();
140 
141 	if (has_single_instance_lock()) {
142 		if (ctx->busy) {
143 			/*
144 			 * We're holding the single-instance lock and the
145 			 * TA is busy, as waiting now would only cause a
146 			 * dead-lock, we release the lock and return false.
147 			 */
148 			rc = false;
149 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
150 				unlock_single_instance();
151 		}
152 	} else {
153 		/*
154 		 * We're not holding the single-instance lock, we're free to
155 		 * wait for the TA to become available.
156 		 */
157 		while (ctx->busy)
158 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
159 	}
160 
161 	/* Either it's already true or we should set it to true */
162 	ctx->busy = true;
163 
164 	mutex_unlock(&tee_ta_mutex);
165 	return rc;
166 }
167 
168 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
169 {
170 	if (!tee_ta_try_set_busy(ctx))
171 		panic();
172 }
173 
174 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
175 {
176 	if (ctx->flags & TA_FLAG_CONCURRENT)
177 		return;
178 
179 	mutex_lock(&tee_ta_mutex);
180 
181 	assert(ctx->busy);
182 	ctx->busy = false;
183 	condvar_signal(&ctx->busy_cv);
184 
185 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
186 		unlock_single_instance();
187 
188 	mutex_unlock(&tee_ta_mutex);
189 }
190 
191 static void dec_session_ref_count(struct tee_ta_session *s)
192 {
193 	assert(s->ref_count > 0);
194 	s->ref_count--;
195 	if (s->ref_count == 1)
196 		condvar_signal(&s->refc_cv);
197 }
198 
199 void tee_ta_put_session(struct tee_ta_session *s)
200 {
201 	mutex_lock(&tee_ta_mutex);
202 
203 	if (s->lock_thread == thread_get_id()) {
204 		s->lock_thread = THREAD_ID_INVALID;
205 		condvar_signal(&s->lock_cv);
206 	}
207 	dec_session_ref_count(s);
208 
209 	mutex_unlock(&tee_ta_mutex);
210 }
211 
212 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
213 			struct tee_ta_session_head *open_sessions)
214 {
215 	struct tee_ta_session *s = NULL;
216 	struct tee_ta_session *found = NULL;
217 
218 	TAILQ_FOREACH(s, open_sessions, link) {
219 		if (s->id == id) {
220 			found = s;
221 			break;
222 		}
223 	}
224 
225 	return found;
226 }
227 
228 struct tee_ta_session *tee_ta_find_session(uint32_t id,
229 			struct tee_ta_session_head *open_sessions)
230 {
231 	struct tee_ta_session *s = NULL;
232 
233 	mutex_lock(&tee_ta_mutex);
234 
235 	s = tee_ta_find_session_nolock(id, open_sessions);
236 
237 	mutex_unlock(&tee_ta_mutex);
238 
239 	return s;
240 }
241 
242 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
243 			struct tee_ta_session_head *open_sessions)
244 {
245 	struct tee_ta_session *s;
246 
247 	mutex_lock(&tee_ta_mutex);
248 
249 	while (true) {
250 		s = tee_ta_find_session_nolock(id, open_sessions);
251 		if (!s)
252 			break;
253 		if (s->unlink) {
254 			s = NULL;
255 			break;
256 		}
257 		s->ref_count++;
258 		if (!exclusive)
259 			break;
260 
261 		assert(s->lock_thread != thread_get_id());
262 
263 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
264 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
265 
266 		if (s->unlink) {
267 			dec_session_ref_count(s);
268 			s = NULL;
269 			break;
270 		}
271 
272 		s->lock_thread = thread_get_id();
273 		break;
274 	}
275 
276 	mutex_unlock(&tee_ta_mutex);
277 	return s;
278 }
279 
280 static void tee_ta_unlink_session(struct tee_ta_session *s,
281 			struct tee_ta_session_head *open_sessions)
282 {
283 	mutex_lock(&tee_ta_mutex);
284 
285 	assert(s->ref_count >= 1);
286 	assert(s->lock_thread == thread_get_id());
287 	assert(!s->unlink);
288 
289 	s->unlink = true;
290 	condvar_broadcast(&s->lock_cv);
291 
292 	while (s->ref_count != 1)
293 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
294 
295 	TAILQ_REMOVE(open_sessions, s, link);
296 
297 	mutex_unlock(&tee_ta_mutex);
298 }
299 
300 static void destroy_session(struct tee_ta_session *s,
301 			    struct tee_ta_session_head *open_sessions)
302 {
303 #if defined(CFG_FTRACE_SUPPORT)
304 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
305 		ts_push_current_session(&s->ts_sess);
306 		s->ts_sess.fbuf = NULL;
307 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
308 		ts_pop_current_session();
309 	}
310 #endif
311 
312 	tee_ta_unlink_session(s, open_sessions);
313 #if defined(CFG_TA_GPROF_SUPPORT)
314 	free(s->ts_sess.sbuf);
315 #endif
316 	free(s);
317 }
318 
319 static void destroy_context(struct tee_ta_ctx *ctx)
320 {
321 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
322 
323 	condvar_destroy(&ctx->busy_cv);
324 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
325 }
326 
327 /*
328  * tee_ta_context_find - Find TA in session list based on a UUID (input)
329  * Returns a pointer to the session
330  */
331 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
332 {
333 	struct tee_ta_ctx *ctx;
334 
335 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
336 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
337 			return ctx;
338 	}
339 
340 	return NULL;
341 }
342 
343 /* check if requester (client ID) matches session initial client */
344 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
345 {
346 	if (id == KERN_IDENTITY)
347 		return TEE_SUCCESS;
348 
349 	if (id == NSAPP_IDENTITY) {
350 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
351 			DMSG("nsec tries to hijack TA session");
352 			return TEE_ERROR_ACCESS_DENIED;
353 		}
354 		return TEE_SUCCESS;
355 	}
356 
357 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
358 		DMSG("client id mismatch");
359 		return TEE_ERROR_ACCESS_DENIED;
360 	}
361 	return TEE_SUCCESS;
362 }
363 
364 /*
365  * Check if invocation parameters matches TA properties
366  *
367  * @s - current session handle
368  * @param - already identified memory references hold a valid 'mobj'.
369  *
370  * Policy:
371  * - All TAs can access 'non-secure' shared memory.
372  * - All TAs can access TEE private memory (seccpy)
373  * - Only SDP flagged TAs can accept SDP memory references.
374  */
375 #ifndef CFG_SECURE_DATA_PATH
376 static bool check_params(struct tee_ta_session *sess __unused,
377 			 struct tee_ta_param *param __unused)
378 {
379 	/*
380 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
381 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
382 	 * permissions regarding memory reference parameters.
383 	 */
384 	return true;
385 }
386 #else
387 static bool check_params(struct tee_ta_session *sess,
388 			 struct tee_ta_param *param)
389 {
390 	int n;
391 
392 	/*
393 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
394 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
395 	 */
396 	if (sess->ts_sess.ctx &&
397 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
398 		return true;
399 
400 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
401 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
402 		struct param_mem *mem = &param->u[n].mem;
403 
404 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
405 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
406 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
407 			continue;
408 		if (!mem->size)
409 			continue;
410 		if (mobj_is_sdp_mem(mem->mobj))
411 			return false;
412 	}
413 	return true;
414 }
415 #endif
416 
417 static void set_invoke_timeout(struct tee_ta_session *sess,
418 				      uint32_t cancel_req_to)
419 {
420 	TEE_Time current_time;
421 	TEE_Time cancel_time;
422 
423 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
424 		goto infinite;
425 
426 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
427 		goto infinite;
428 
429 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
430 			 &cancel_time.seconds))
431 		goto infinite;
432 
433 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
434 	if (cancel_time.millis > 1000) {
435 		if (ADD_OVERFLOW(current_time.seconds, 1,
436 				 &cancel_time.seconds))
437 			goto infinite;
438 
439 		cancel_time.seconds++;
440 		cancel_time.millis -= 1000;
441 	}
442 
443 	sess->cancel_time = cancel_time;
444 	return;
445 
446 infinite:
447 	sess->cancel_time.seconds = UINT32_MAX;
448 	sess->cancel_time.millis = UINT32_MAX;
449 }
450 
451 /*-----------------------------------------------------------------------------
452  * Close a Trusted Application and free available resources
453  *---------------------------------------------------------------------------*/
454 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
455 				struct tee_ta_session_head *open_sessions,
456 				const TEE_Identity *clnt_id)
457 {
458 	struct tee_ta_session *sess = NULL;
459 	struct tee_ta_ctx *ctx = NULL;
460 	struct ts_ctx *ts_ctx = NULL;
461 	bool keep_alive = false;
462 
463 	DMSG("csess 0x%" PRIxVA " id %u",
464 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
465 
466 	if (!csess)
467 		return TEE_ERROR_ITEM_NOT_FOUND;
468 
469 	sess = tee_ta_get_session(csess->id, true, open_sessions);
470 
471 	if (!sess) {
472 		EMSG("session 0x%" PRIxVA " to be removed is not found",
473 		     (vaddr_t)csess);
474 		return TEE_ERROR_ITEM_NOT_FOUND;
475 	}
476 
477 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
478 		tee_ta_put_session(sess);
479 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
480 	}
481 
482 	DMSG("Destroy session");
483 
484 	ts_ctx = sess->ts_sess.ctx;
485 	if (!ts_ctx) {
486 		destroy_session(sess, open_sessions);
487 		return TEE_SUCCESS;
488 	}
489 
490 	ctx = ts_to_ta_ctx(ts_ctx);
491 	if (ctx->panicked) {
492 		destroy_session(sess, open_sessions);
493 	} else {
494 		tee_ta_set_busy(ctx);
495 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
496 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
497 		destroy_session(sess, open_sessions);
498 		tee_ta_clear_busy(ctx);
499 	}
500 
501 	mutex_lock(&tee_ta_mutex);
502 
503 	if (ctx->ref_count <= 0)
504 		panic();
505 
506 	ctx->ref_count--;
507 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
508 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
509 	if (!ctx->ref_count && (ctx->panicked || !keep_alive)) {
510 		if (!ctx->is_releasing) {
511 			TAILQ_REMOVE(&tee_ctxes, ctx, link);
512 			ctx->is_releasing = true;
513 		}
514 		mutex_unlock(&tee_ta_mutex);
515 
516 		destroy_context(ctx);
517 	} else
518 		mutex_unlock(&tee_ta_mutex);
519 
520 	return TEE_SUCCESS;
521 }
522 
523 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
524 						   const TEE_UUID *uuid)
525 {
526 	struct tee_ta_ctx *ctx = NULL;
527 
528 	while (true) {
529 		ctx = tee_ta_context_find(uuid);
530 		if (!ctx)
531 			return TEE_ERROR_ITEM_NOT_FOUND;
532 
533 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
534 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
535 			break;
536 		/*
537 		 * Context is still initializing, wait here until it's
538 		 * fully initialized. Note that we're searching for the
539 		 * context again since it may have been removed while we
540 		 * where sleeping.
541 		 */
542 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
543 	}
544 
545 	/*
546 	 * If TA isn't single instance it should be loaded as new
547 	 * instance instead of doing anything with this instance.
548 	 * So tell the caller that we didn't find the TA it the
549 	 * caller will load a new instance.
550 	 */
551 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
552 		return TEE_ERROR_ITEM_NOT_FOUND;
553 
554 	/*
555 	 * The TA is single instance, if it isn't multi session we
556 	 * can't create another session unless its reference is zero
557 	 */
558 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
559 		return TEE_ERROR_BUSY;
560 
561 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
562 
563 	ctx->ref_count++;
564 	s->ts_sess.ctx = &ctx->ts_ctx;
565 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
566 	return TEE_SUCCESS;
567 }
568 
569 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
570 {
571 	struct tee_ta_session *last = NULL;
572 	uint32_t saved = 0;
573 	uint32_t id = 1;
574 
575 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
576 	if (last) {
577 		/* This value is less likely to be already used */
578 		id = last->id + 1;
579 		if (!id)
580 			id++; /* 0 is not valid */
581 	}
582 
583 	saved = id;
584 	do {
585 		if (!tee_ta_find_session_nolock(id, open_sessions))
586 			return id;
587 		id++;
588 		if (!id)
589 			id++;
590 	} while (id != saved);
591 
592 	return 0;
593 }
594 
595 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
596 				struct tee_ta_session_head *open_sessions,
597 				const TEE_UUID *uuid,
598 				struct tee_ta_session **sess)
599 {
600 	TEE_Result res;
601 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
602 
603 	*err = TEE_ORIGIN_TEE;
604 	if (!s)
605 		return TEE_ERROR_OUT_OF_MEMORY;
606 
607 	s->cancel_mask = true;
608 	condvar_init(&s->refc_cv);
609 	condvar_init(&s->lock_cv);
610 	s->lock_thread = THREAD_ID_INVALID;
611 	s->ref_count = 1;
612 
613 	mutex_lock(&tee_ta_mutex);
614 	s->id = new_session_id(open_sessions);
615 	if (!s->id) {
616 		res = TEE_ERROR_OVERFLOW;
617 		goto err_mutex_unlock;
618 	}
619 
620 	TAILQ_INSERT_TAIL(open_sessions, s, link);
621 
622 	/* Look for already loaded TA */
623 	res = tee_ta_init_session_with_context(s, uuid);
624 	mutex_unlock(&tee_ta_mutex);
625 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
626 		goto out;
627 
628 	/* Look for secure partition */
629 	res = stmm_init_session(uuid, s);
630 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
631 		goto out;
632 
633 	/* Look for pseudo TA */
634 	res = tee_ta_init_pseudo_ta_session(uuid, s);
635 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
636 		goto out;
637 
638 	/* Look for user TA */
639 	res = tee_ta_init_user_ta_session(uuid, s);
640 
641 out:
642 	if (!res) {
643 		*sess = s;
644 		return TEE_SUCCESS;
645 	}
646 
647 	mutex_lock(&tee_ta_mutex);
648 	TAILQ_REMOVE(open_sessions, s, link);
649 err_mutex_unlock:
650 	mutex_unlock(&tee_ta_mutex);
651 	free(s);
652 	return res;
653 }
654 
655 static void release_ta_ctx(struct tee_ta_ctx *ctx)
656 {
657 	bool was_releasing = false;
658 
659 	mutex_lock(&tee_ta_mutex);
660 	was_releasing = ctx->is_releasing;
661 	ctx->is_releasing = true;
662 	if (!was_releasing) {
663 		DMSG("Releasing panicked TA ctx");
664 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
665 	}
666 	mutex_unlock(&tee_ta_mutex);
667 
668 	if (!was_releasing)
669 		ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
670 }
671 
672 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
673 			       struct tee_ta_session **sess,
674 			       struct tee_ta_session_head *open_sessions,
675 			       const TEE_UUID *uuid,
676 			       const TEE_Identity *clnt_id,
677 			       uint32_t cancel_req_to,
678 			       struct tee_ta_param *param)
679 {
680 	TEE_Result res = TEE_SUCCESS;
681 	struct tee_ta_session *s = NULL;
682 	struct tee_ta_ctx *ctx = NULL;
683 	struct ts_ctx *ts_ctx = NULL;
684 	bool panicked = false;
685 	bool was_busy = false;
686 
687 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
688 	if (res != TEE_SUCCESS) {
689 		DMSG("init session failed 0x%x", res);
690 		return res;
691 	}
692 
693 	if (!check_params(s, param))
694 		return TEE_ERROR_BAD_PARAMETERS;
695 
696 	ts_ctx = s->ts_sess.ctx;
697 	ctx = ts_to_ta_ctx(ts_ctx);
698 
699 	if (tee_ta_try_set_busy(ctx)) {
700 		if (!ctx->panicked) {
701 			/* Save identity of the owner of the session */
702 			s->clnt_id = *clnt_id;
703 			s->param = param;
704 			set_invoke_timeout(s, cancel_req_to);
705 			res = ts_ctx->ops->enter_open_session(&s->ts_sess);
706 			s->param = NULL;
707 		}
708 
709 		panicked = ctx->panicked;
710 		if (panicked) {
711 			release_ta_ctx(ctx);
712 			res = TEE_ERROR_TARGET_DEAD;
713 		}
714 
715 		tee_ta_clear_busy(ctx);
716 	} else {
717 		/* Deadlock avoided */
718 		res = TEE_ERROR_BUSY;
719 		was_busy = true;
720 	}
721 
722 	/*
723 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
724 	 * apart from panicking.
725 	 */
726 	if (panicked || was_busy)
727 		*err = TEE_ORIGIN_TEE;
728 	else
729 		*err = s->err_origin;
730 
731 	tee_ta_put_session(s);
732 	if (panicked || res != TEE_SUCCESS)
733 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
734 
735 	if (!res)
736 		*sess = s;
737 	else
738 		EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res);
739 
740 	return res;
741 }
742 
743 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
744 				 struct tee_ta_session *sess,
745 				 const TEE_Identity *clnt_id,
746 				 uint32_t cancel_req_to, uint32_t cmd,
747 				 struct tee_ta_param *param)
748 {
749 	struct tee_ta_ctx *ta_ctx = NULL;
750 	struct ts_ctx *ts_ctx = NULL;
751 	TEE_Result res = TEE_SUCCESS;
752 	bool panicked = false;
753 
754 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
755 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
756 
757 	if (!check_params(sess, param))
758 		return TEE_ERROR_BAD_PARAMETERS;
759 
760 	ts_ctx = sess->ts_sess.ctx;
761 	ta_ctx = ts_to_ta_ctx(ts_ctx);
762 
763 	tee_ta_set_busy(ta_ctx);
764 
765 	if (!ta_ctx->panicked) {
766 		sess->param = param;
767 		set_invoke_timeout(sess, cancel_req_to);
768 		res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
769 		sess->param = NULL;
770 	}
771 
772 	panicked = ta_ctx->panicked;
773 	if (panicked) {
774 		release_ta_ctx(ta_ctx);
775 		res = TEE_ERROR_TARGET_DEAD;
776 	}
777 
778 	tee_ta_clear_busy(ta_ctx);
779 
780 	/*
781 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
782 	 * apart from panicking.
783 	 */
784 	if (panicked)
785 		*err = TEE_ORIGIN_TEE;
786 	else
787 		*err = sess->err_origin;
788 
789 	/* Short buffer is not an effective error case */
790 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
791 		DMSG("Error: %x of %d", res, *err);
792 
793 	return res;
794 }
795 
796 #if defined(CFG_TA_STATS)
797 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
798 				   struct tee_ta_param *param)
799 {
800 	TEE_Result res = TEE_SUCCESS;
801 	struct tee_ta_ctx *ctx = NULL;
802 	struct ts_ctx *ts_ctx = NULL;
803 	bool panicked = false;
804 
805 	ts_ctx = s->ts_sess.ctx;
806 	if (!ts_ctx)
807 		return TEE_ERROR_ITEM_NOT_FOUND;
808 
809 	if (is_user_ta_ctx(ts_ctx) &&
810 	    to_user_ta_ctx(ts_ctx)->uctx.is_initializing)
811 		return TEE_ERROR_BAD_STATE;
812 
813 	ctx = ts_to_ta_ctx(ts_ctx);
814 
815 	if (tee_ta_try_set_busy(ctx)) {
816 		if (!ctx->panicked) {
817 			s->param = param;
818 			set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
819 			res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
820 			s->param = NULL;
821 		}
822 
823 		panicked = ctx->panicked;
824 		if (panicked) {
825 			release_ta_ctx(ctx);
826 			res = TEE_ERROR_TARGET_DEAD;
827 		}
828 
829 		tee_ta_clear_busy(ctx);
830 	} else {
831 		/* Deadlock avoided */
832 		res = TEE_ERROR_BUSY;
833 	}
834 
835 	return res;
836 }
837 
838 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
839 {
840 	struct tee_ta_session *sess = NULL;
841 	struct tee_ta_session_head *open_sessions = NULL;
842 	struct tee_ta_ctx *ctx = NULL;
843 	unsigned int n = 0;
844 
845 	nsec_sessions_list_head(&open_sessions);
846 	/*
847 	 * Scan all sessions opened from secure side by searching through
848 	 * all available TA instances and for each context, scan all opened
849 	 * sessions.
850 	 */
851 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
852 		unsigned int cnt = 0;
853 
854 		if (!is_user_ta_ctx(&ctx->ts_ctx))
855 			continue;
856 
857 		memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
858 		       sizeof(ctx->ts_ctx.uuid));
859 		dump_ctx[n].panicked = ctx->panicked;
860 		dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
861 		TAILQ_FOREACH(sess, open_sessions, link) {
862 			if (sess->ts_sess.ctx == &ctx->ts_ctx) {
863 				if (cnt == MAX_DUMP_SESS_NUM)
864 					break;
865 
866 				dump_ctx[n].sess_id[cnt] = sess->id;
867 				cnt++;
868 			}
869 		}
870 
871 		dump_ctx[n].sess_num = cnt;
872 		n++;
873 	}
874 }
875 
876 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
877 				struct pta_stats_ta *dump_stats,
878 				size_t ta_count)
879 {
880 	TEE_Result res = TEE_SUCCESS;
881 	struct tee_ta_session *sess = NULL;
882 	struct tee_ta_session_head *open_sessions = NULL;
883 	struct tee_ta_param param = { };
884 	unsigned int i = 0;
885 	unsigned int j = 0;
886 
887 	nsec_sessions_list_head(&open_sessions);
888 
889 	for (i = 0; i < ta_count; i++) {
890 		struct pta_stats_ta *stats = &dump_stats[i];
891 
892 		memcpy(&stats->uuid, &dump_ctx[i].uuid,
893 		       sizeof(dump_ctx[i].uuid));
894 		stats->panicked = dump_ctx[i].panicked;
895 		stats->sess_num = dump_ctx[i].sess_num;
896 
897 		/* Find a session from dump context */
898 		for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
899 			sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
900 						  open_sessions);
901 
902 		if (!sess)
903 			continue;
904 		/* If session is existing, get its heap stats */
905 		memset(&param, 0, sizeof(struct tee_ta_param));
906 		param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
907 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
908 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
909 					      TEE_PARAM_TYPE_NONE);
910 		res = dump_ta_memstats(sess, &param);
911 		if (res == TEE_SUCCESS) {
912 			stats->heap.allocated = param.u[0].val.a;
913 			stats->heap.max_allocated = param.u[0].val.b;
914 			stats->heap.size = param.u[1].val.a;
915 			stats->heap.num_alloc_fail = param.u[1].val.b;
916 			stats->heap.biggest_alloc_fail = param.u[2].val.a;
917 			stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
918 		} else {
919 			memset(&stats->heap, 0, sizeof(stats->heap));
920 		}
921 		tee_ta_put_session(sess);
922 	}
923 
924 	return TEE_SUCCESS;
925 }
926 
927 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
928 {
929 	TEE_Result res = TEE_SUCCESS;
930 	struct pta_stats_ta *dump_stats = NULL;
931 	struct tee_ta_dump_ctx *dump_ctx = NULL;
932 	struct tee_ta_ctx *ctx = NULL;
933 	size_t sz = 0;
934 	size_t ta_count = 0;
935 
936 	if (!buf_size)
937 		return TEE_ERROR_BAD_PARAMETERS;
938 
939 	mutex_lock(&tee_ta_mutex);
940 
941 	/* Go through all available TA and calc out the actual buffer size. */
942 	TAILQ_FOREACH(ctx, &tee_ctxes, link)
943 		if (is_user_ta_ctx(&ctx->ts_ctx))
944 			ta_count++;
945 
946 	sz = sizeof(struct pta_stats_ta) * ta_count;
947 	if (!sz) {
948 		/* sz = 0 means there is no UTA, return no item found. */
949 		res = TEE_ERROR_ITEM_NOT_FOUND;
950 	} else if (!buf || *buf_size < sz) {
951 		/*
952 		 * buf is null or pass size less than actual size
953 		 * means caller try to query the buffer size.
954 		 * update *buf_size.
955 		 */
956 		*buf_size = sz;
957 		res = TEE_ERROR_SHORT_BUFFER;
958 	} else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
959 		DMSG("Data alignment");
960 		res = TEE_ERROR_BAD_PARAMETERS;
961 	} else {
962 		dump_stats = (struct pta_stats_ta *)buf;
963 		dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
964 		if (!dump_ctx)
965 			res = TEE_ERROR_OUT_OF_MEMORY;
966 		else
967 			init_dump_ctx(dump_ctx);
968 	}
969 	mutex_unlock(&tee_ta_mutex);
970 
971 	if (res != TEE_SUCCESS)
972 		return res;
973 
974 	/* Dump user ta stats by iterating dump_ctx[] */
975 	res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
976 	if (res == TEE_SUCCESS)
977 		*buf_size = sz;
978 
979 	free(dump_ctx);
980 	return res;
981 }
982 #endif
983 
984 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
985 				 struct tee_ta_session *sess,
986 				 const TEE_Identity *clnt_id)
987 {
988 	*err = TEE_ORIGIN_TEE;
989 
990 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
991 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
992 
993 	sess->cancel = true;
994 	return TEE_SUCCESS;
995 }
996 
997 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
998 {
999 	TEE_Time current_time;
1000 
1001 	if (s->cancel_mask)
1002 		return false;
1003 
1004 	if (s->cancel)
1005 		return true;
1006 
1007 	if (s->cancel_time.seconds == UINT32_MAX)
1008 		return false;
1009 
1010 	if (curr_time != NULL)
1011 		current_time = *curr_time;
1012 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
1013 		return false;
1014 
1015 	if (current_time.seconds > s->cancel_time.seconds ||
1016 	    (current_time.seconds == s->cancel_time.seconds &&
1017 	     current_time.millis >= s->cancel_time.millis)) {
1018 		return true;
1019 	}
1020 
1021 	return false;
1022 }
1023 
1024 #if defined(CFG_TA_GPROF_SUPPORT)
1025 void tee_ta_gprof_sample_pc(vaddr_t pc)
1026 {
1027 	struct ts_session *s = ts_get_current_session();
1028 	struct user_ta_ctx *utc = NULL;
1029 	struct sample_buf *sbuf = NULL;
1030 	TEE_Result res = 0;
1031 	size_t idx = 0;
1032 
1033 	sbuf = s->sbuf;
1034 	if (!sbuf || !sbuf->enabled)
1035 		return; /* PC sampling is not enabled */
1036 
1037 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1038 	if (idx < sbuf->nsamples) {
1039 		utc = to_user_ta_ctx(s->ctx);
1040 		res = vm_check_access_rights(&utc->uctx,
1041 					     TEE_MEMORY_ACCESS_READ |
1042 					     TEE_MEMORY_ACCESS_WRITE |
1043 					     TEE_MEMORY_ACCESS_ANY_OWNER,
1044 					     (uaddr_t)&sbuf->samples[idx],
1045 					     sizeof(*sbuf->samples));
1046 		if (res != TEE_SUCCESS)
1047 			return;
1048 		sbuf->samples[idx]++;
1049 	}
1050 	sbuf->count++;
1051 }
1052 
1053 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1054 				       uint64_t now)
1055 {
1056 	struct sample_buf *sbuf = s->sbuf;
1057 
1058 	if (!sbuf)
1059 		return;
1060 
1061 	if (suspend) {
1062 		assert(sbuf->usr_entered);
1063 		sbuf->usr += now - sbuf->usr_entered;
1064 		sbuf->usr_entered = 0;
1065 	} else {
1066 		assert(!sbuf->usr_entered);
1067 		if (!now)
1068 			now++; /* 0 is reserved */
1069 		sbuf->usr_entered = now;
1070 	}
1071 }
1072 
1073 /*
1074  * Update user-mode CPU time for the current session
1075  * @suspend: true if session is being suspended (leaving user mode), false if
1076  * it is resumed (entering user mode)
1077  */
1078 static void tee_ta_update_session_utime(bool suspend)
1079 {
1080 	struct ts_session *s = ts_get_current_session();
1081 	uint64_t now = barrier_read_counter_timer();
1082 
1083 	gprof_update_session_utime(suspend, s, now);
1084 }
1085 
1086 void tee_ta_update_session_utime_suspend(void)
1087 {
1088 	tee_ta_update_session_utime(true);
1089 }
1090 
1091 void tee_ta_update_session_utime_resume(void)
1092 {
1093 	tee_ta_update_session_utime(false);
1094 }
1095 #endif
1096 
1097 #if defined(CFG_FTRACE_SUPPORT)
1098 static void ftrace_update_times(bool suspend)
1099 {
1100 	struct ts_session *s = ts_get_current_session_may_fail();
1101 	struct ftrace_buf *fbuf = NULL;
1102 	uint64_t now = 0;
1103 	uint32_t i = 0;
1104 
1105 	if (!s)
1106 		return;
1107 
1108 	now = barrier_read_counter_timer();
1109 
1110 	fbuf = s->fbuf;
1111 	if (!fbuf)
1112 		return;
1113 
1114 	if (suspend) {
1115 		fbuf->suspend_time = now;
1116 	} else {
1117 		for (i = 0; i <= fbuf->ret_idx; i++)
1118 			fbuf->begin_time[i] += now - fbuf->suspend_time;
1119 	}
1120 }
1121 
1122 void tee_ta_ftrace_update_times_suspend(void)
1123 {
1124 	ftrace_update_times(true);
1125 }
1126 
1127 void tee_ta_ftrace_update_times_resume(void)
1128 {
1129 	ftrace_update_times(false);
1130 }
1131 #endif
1132 
1133 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1134 {
1135 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1136 }
1137