xref: /optee_os/core/kernel/tee_ta_manager.c (revision 983d02116743476904b68d52ca432d0f79c38c43)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <types_ext.h>
29 #include <stdbool.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <arm.h>
34 #include <assert.h>
35 #include <kernel/mutex.h>
36 #include <kernel/panic.h>
37 #include <kernel/static_ta.h>
38 #include <kernel/tee_common.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tee_ta_manager.h>
41 #include <kernel/tee_time.h>
42 #include <kernel/thread.h>
43 #include <kernel/user_ta.h>
44 #include <mm/core_mmu.h>
45 #include <mm/core_memprot.h>
46 #include <mm/tee_mmu.h>
47 #include <tee/tee_svc_cryp.h>
48 #include <tee/tee_obj.h>
49 #include <tee/tee_svc_storage.h>
50 #include <tee_api_types.h>
51 #include <trace.h>
52 #include <utee_types.h>
53 #include <util.h>
54 
55 /* This mutex protects the critical section in tee_ta_init_session */
56 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
57 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
58 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
59 static size_t tee_ta_single_instance_count;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 static void lock_single_instance(void)
63 {
64 	/* Requires tee_ta_mutex to be held */
65 	if (tee_ta_single_instance_thread != thread_get_id()) {
66 		/* Wait until the single-instance lock is available. */
67 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
68 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
69 
70 		tee_ta_single_instance_thread = thread_get_id();
71 		assert(tee_ta_single_instance_count == 0);
72 	}
73 
74 	tee_ta_single_instance_count++;
75 }
76 
77 static void unlock_single_instance(void)
78 {
79 	/* Requires tee_ta_mutex to be held */
80 	assert(tee_ta_single_instance_thread == thread_get_id());
81 	assert(tee_ta_single_instance_count > 0);
82 
83 	tee_ta_single_instance_count--;
84 	if (tee_ta_single_instance_count == 0) {
85 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
86 		condvar_signal(&tee_ta_cv);
87 	}
88 }
89 
90 static bool has_single_instance_lock(void)
91 {
92 	/* Requires tee_ta_mutex to be held */
93 	return tee_ta_single_instance_thread == thread_get_id();
94 }
95 
96 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
97 {
98 	bool rc = true;
99 
100 	mutex_lock(&tee_ta_mutex);
101 
102 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
103 		lock_single_instance();
104 
105 	if (has_single_instance_lock()) {
106 		if (ctx->busy) {
107 			/*
108 			 * We're holding the single-instance lock and the
109 			 * TA is busy, as waiting now would only cause a
110 			 * dead-lock, we release the lock and return false.
111 			 */
112 			rc = false;
113 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 				unlock_single_instance();
115 		}
116 	} else {
117 		/*
118 		 * We're not holding the single-instance lock, we're free to
119 		 * wait for the TA to become available.
120 		 */
121 		while (ctx->busy)
122 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
123 	}
124 
125 	/* Either it's already true or we should set it to true */
126 	ctx->busy = true;
127 
128 	mutex_unlock(&tee_ta_mutex);
129 	return rc;
130 }
131 
132 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
133 {
134 	if (!tee_ta_try_set_busy(ctx))
135 		panic();
136 }
137 
138 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
139 {
140 	mutex_lock(&tee_ta_mutex);
141 
142 	assert(ctx->busy);
143 	ctx->busy = false;
144 	condvar_signal(&ctx->busy_cv);
145 
146 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
147 		unlock_single_instance();
148 
149 	mutex_unlock(&tee_ta_mutex);
150 }
151 
152 static void dec_session_ref_count(struct tee_ta_session *s)
153 {
154 	assert(s->ref_count > 0);
155 	s->ref_count--;
156 	if (s->ref_count == 1)
157 		condvar_signal(&s->refc_cv);
158 }
159 
160 void tee_ta_put_session(struct tee_ta_session *s)
161 {
162 	mutex_lock(&tee_ta_mutex);
163 
164 	if (s->lock_thread == thread_get_id()) {
165 		s->lock_thread = THREAD_ID_INVALID;
166 		condvar_signal(&s->lock_cv);
167 	}
168 	dec_session_ref_count(s);
169 
170 	mutex_unlock(&tee_ta_mutex);
171 }
172 
173 static struct tee_ta_session *find_session(uint32_t id,
174 			struct tee_ta_session_head *open_sessions)
175 {
176 	struct tee_ta_session *s;
177 
178 	TAILQ_FOREACH(s, open_sessions, link) {
179 		if ((vaddr_t)s == id)
180 			return s;
181 	}
182 	return NULL;
183 }
184 
185 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
186 			struct tee_ta_session_head *open_sessions)
187 {
188 	struct tee_ta_session *s;
189 
190 	mutex_lock(&tee_ta_mutex);
191 
192 	while (true) {
193 		s = find_session(id, open_sessions);
194 		if (!s)
195 			break;
196 		if (s->unlink) {
197 			s = NULL;
198 			break;
199 		}
200 		s->ref_count++;
201 		if (!exclusive)
202 			break;
203 
204 		assert(s->lock_thread != thread_get_id());
205 
206 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
207 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
208 
209 		if (s->unlink) {
210 			dec_session_ref_count(s);
211 			s = NULL;
212 			break;
213 		}
214 
215 		s->lock_thread = thread_get_id();
216 		break;
217 	}
218 
219 	mutex_unlock(&tee_ta_mutex);
220 	return s;
221 }
222 
223 static void tee_ta_unlink_session(struct tee_ta_session *s,
224 			struct tee_ta_session_head *open_sessions)
225 {
226 	mutex_lock(&tee_ta_mutex);
227 
228 	assert(s->ref_count >= 1);
229 	assert(s->lock_thread == thread_get_id());
230 	assert(!s->unlink);
231 
232 	s->unlink = true;
233 	condvar_broadcast(&s->lock_cv);
234 
235 	while (s->ref_count != 1)
236 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
237 
238 	TAILQ_REMOVE(open_sessions, s, link);
239 
240 	mutex_unlock(&tee_ta_mutex);
241 }
242 
243 /*
244  * tee_ta_context_find - Find TA in session list based on a UUID (input)
245  * Returns a pointer to the session
246  */
247 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
248 {
249 	struct tee_ta_ctx *ctx;
250 
251 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
252 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
253 			return ctx;
254 	}
255 
256 	return NULL;
257 }
258 
259 /* check if requester (client ID) matches session initial client */
260 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
261 {
262 	if (id == KERN_IDENTITY)
263 		return TEE_SUCCESS;
264 
265 	if (id == NSAPP_IDENTITY) {
266 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
267 			DMSG("nsec tries to hijack TA session");
268 			return TEE_ERROR_ACCESS_DENIED;
269 		}
270 		return TEE_SUCCESS;
271 	}
272 
273 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
274 		DMSG("client id mismatch");
275 		return TEE_ERROR_ACCESS_DENIED;
276 	}
277 	return TEE_SUCCESS;
278 }
279 
280 static void set_invoke_timeout(struct tee_ta_session *sess,
281 				      uint32_t cancel_req_to)
282 {
283 	TEE_Time current_time;
284 	TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX };
285 
286 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
287 		goto out;
288 
289 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
290 		goto out;
291 
292 	/* Check that it doesn't wrap */
293 	if (current_time.seconds + (cancel_req_to / 1000) >=
294 	    current_time.seconds) {
295 		cancel_time.seconds =
296 		    current_time.seconds + cancel_req_to / 1000;
297 		cancel_time.millis = current_time.millis + cancel_req_to % 1000;
298 		if (cancel_time.millis > 1000) {
299 			cancel_time.seconds++;
300 			cancel_time.millis -= 1000;
301 		}
302 	}
303 
304 out:
305 	sess->cancel_time = cancel_time;
306 }
307 
308 /*-----------------------------------------------------------------------------
309  * Close a Trusted Application and free available resources
310  *---------------------------------------------------------------------------*/
311 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
312 				struct tee_ta_session_head *open_sessions,
313 				const TEE_Identity *clnt_id)
314 {
315 	struct tee_ta_session *sess;
316 	struct tee_ta_ctx *ctx;
317 
318 	DMSG("tee_ta_close_session(0x%" PRIxVA ")",  (vaddr_t)csess);
319 
320 	if (!csess)
321 		return TEE_ERROR_ITEM_NOT_FOUND;
322 
323 	sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions);
324 
325 	if (!sess) {
326 		EMSG("session 0x%" PRIxVA " to be removed is not found",
327 		     (vaddr_t)csess);
328 		return TEE_ERROR_ITEM_NOT_FOUND;
329 	}
330 
331 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
332 		tee_ta_put_session(sess);
333 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
334 	}
335 
336 	ctx = sess->ctx;
337 	DMSG("   ... Destroy session");
338 
339 	tee_ta_set_busy(ctx);
340 
341 	if (!ctx->panicked) {
342 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
343 		ctx->ops->enter_close_session(sess);
344 	}
345 
346 	tee_ta_unlink_session(sess, open_sessions);
347 	free(sess);
348 
349 	tee_ta_clear_busy(ctx);
350 
351 	mutex_lock(&tee_ta_mutex);
352 
353 	if (ctx->ref_count <= 0)
354 		panic();
355 
356 	ctx->ref_count--;
357 	if (!ctx->ref_count && !(ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE)) {
358 		DMSG("   ... Destroy TA ctx");
359 
360 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
361 		mutex_unlock(&tee_ta_mutex);
362 
363 		condvar_destroy(&ctx->busy_cv);
364 
365 		ctx->ops->destroy(ctx);
366 	} else
367 		mutex_unlock(&tee_ta_mutex);
368 
369 	return TEE_SUCCESS;
370 }
371 
372 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
373 			struct tee_ta_session *s)
374 {
375 	/*
376 	 * If TA isn't single instance it should be loaded as new
377 	 * instance instead of doing anything with this instance.
378 	 * So tell the caller that we didn't find the TA it the
379 	 * caller will load a new instance.
380 	 */
381 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
382 		return TEE_ERROR_ITEM_NOT_FOUND;
383 
384 	/*
385 	 * The TA is single instance, if it isn't multi session we
386 	 * can't create another session unless it's the first
387 	 * new session towards a keepAlive TA.
388 	 */
389 
390 	if (((ctx->flags & TA_FLAG_MULTI_SESSION) == 0) &&
391 	    !(((ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) != 0) &&
392 	      (ctx->ref_count == 0)))
393 		return TEE_ERROR_BUSY;
394 
395 	DMSG("   ... Re-open TA %pUl", (void *)&ctx->uuid);
396 
397 	ctx->ref_count++;
398 	s->ctx = ctx;
399 	return TEE_SUCCESS;
400 }
401 
402 
403 
404 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
405 				struct tee_ta_session_head *open_sessions,
406 				const TEE_UUID *uuid,
407 				struct tee_ta_session **sess)
408 {
409 	TEE_Result res;
410 	struct tee_ta_ctx *ctx;
411 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
412 
413 	*err = TEE_ORIGIN_TEE;
414 	if (!s)
415 		return TEE_ERROR_OUT_OF_MEMORY;
416 
417 	s->cancel_mask = true;
418 	condvar_init(&s->refc_cv);
419 	condvar_init(&s->lock_cv);
420 	s->lock_thread = THREAD_ID_INVALID;
421 	s->ref_count = 1;
422 
423 
424 	/*
425 	 * We take the global TA mutex here and hold it while doing
426 	 * RPC to load the TA. This big critical section should be broken
427 	 * down into smaller pieces.
428 	 */
429 
430 
431 	mutex_lock(&tee_ta_mutex);
432 	TAILQ_INSERT_TAIL(open_sessions, s, link);
433 
434 	/* Look for already loaded TA */
435 	ctx = tee_ta_context_find(uuid);
436 	if (ctx) {
437 		res = tee_ta_init_session_with_context(ctx, s);
438 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
439 			goto out;
440 	}
441 
442 	/* Look for static TA */
443 	res = tee_ta_init_static_ta_session(uuid, s);
444 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
445 		goto out;
446 
447 	/* Look for user TA */
448 	res = tee_ta_init_user_ta_session(uuid, s);
449 
450 out:
451 	if (res == TEE_SUCCESS) {
452 		*sess = s;
453 	} else {
454 		TAILQ_REMOVE(open_sessions, s, link);
455 		free(s);
456 	}
457 	mutex_unlock(&tee_ta_mutex);
458 	return res;
459 }
460 
461 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
462 			       struct tee_ta_session **sess,
463 			       struct tee_ta_session_head *open_sessions,
464 			       const TEE_UUID *uuid,
465 			       const TEE_Identity *clnt_id,
466 			       uint32_t cancel_req_to,
467 			       struct tee_ta_param *param)
468 {
469 	TEE_Result res;
470 	struct tee_ta_session *s = NULL;
471 	struct tee_ta_ctx *ctx;
472 	bool panicked;
473 	bool was_busy = false;
474 
475 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
476 	if (res != TEE_SUCCESS) {
477 		DMSG("init session failed 0x%x", res);
478 		return res;
479 	}
480 
481 	ctx = s->ctx;
482 
483 	if (ctx->panicked) {
484 		DMSG("panicked, call tee_ta_close_session()");
485 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
486 		*err = TEE_ORIGIN_TEE;
487 		return TEE_ERROR_TARGET_DEAD;
488 	}
489 
490 	*sess = s;
491 	/* Save identity of the owner of the session */
492 	s->clnt_id = *clnt_id;
493 
494 	res = tee_ta_verify_param(s, param);
495 	if (res == TEE_SUCCESS) {
496 		if (tee_ta_try_set_busy(ctx)) {
497 			set_invoke_timeout(s, cancel_req_to);
498 			res = ctx->ops->enter_open_session(s, param, err);
499 			tee_ta_clear_busy(ctx);
500 		} else {
501 			/* Deadlock avoided */
502 			res = TEE_ERROR_BUSY;
503 			was_busy = true;
504 		}
505 	}
506 
507 	panicked = ctx->panicked;
508 
509 	tee_ta_put_session(s);
510 	if (panicked || (res != TEE_SUCCESS))
511 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
512 
513 	/*
514 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
515 	 * apart from panicking.
516 	 */
517 	if (panicked || was_busy)
518 		*err = TEE_ORIGIN_TEE;
519 	else
520 		*err = TEE_ORIGIN_TRUSTED_APP;
521 
522 	if (res != TEE_SUCCESS)
523 		EMSG("Failed. Return error 0x%x", res);
524 
525 	return res;
526 }
527 
528 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
529 				 struct tee_ta_session *sess,
530 				 const TEE_Identity *clnt_id,
531 				 uint32_t cancel_req_to, uint32_t cmd,
532 				 struct tee_ta_param *param)
533 {
534 	TEE_Result res;
535 
536 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
537 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
538 
539 	if (sess->ctx->panicked) {
540 		DMSG("   Panicked !");
541 		*err = TEE_ORIGIN_TEE;
542 		return TEE_ERROR_TARGET_DEAD;
543 	}
544 
545 	tee_ta_set_busy(sess->ctx);
546 
547 	res = tee_ta_verify_param(sess, param);
548 	if (res != TEE_SUCCESS) {
549 		*err = TEE_ORIGIN_TEE;
550 		goto function_exit;
551 	}
552 
553 	set_invoke_timeout(sess, cancel_req_to);
554 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
555 
556 	if (sess->ctx->panicked) {
557 		*err = TEE_ORIGIN_TEE;
558 		res = TEE_ERROR_TARGET_DEAD;
559 	}
560 
561 function_exit:
562 	tee_ta_clear_busy(sess->ctx);
563 	if (res != TEE_SUCCESS)
564 		DMSG("  => Error: %x of %d\n", res, *err);
565 	return res;
566 }
567 
568 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
569 				 struct tee_ta_session *sess,
570 				 const TEE_Identity *clnt_id)
571 {
572 	*err = TEE_ORIGIN_TEE;
573 
574 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
575 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
576 
577 	sess->cancel = true;
578 	return TEE_SUCCESS;
579 }
580 
581 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
582 {
583 	TEE_Time current_time;
584 
585 	if (s->cancel_mask)
586 		return false;
587 
588 	if (s->cancel)
589 		return true;
590 
591 	if (s->cancel_time.seconds == UINT32_MAX)
592 		return false;
593 
594 	if (curr_time != NULL)
595 		current_time = *curr_time;
596 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
597 		return false;
598 
599 	if (current_time.seconds > s->cancel_time.seconds ||
600 	    (current_time.seconds == s->cancel_time.seconds &&
601 	     current_time.millis >= s->cancel_time.millis)) {
602 		return true;
603 	}
604 
605 	return false;
606 }
607 
608 static void update_current_ctx(struct thread_specific_data *tsd)
609 {
610 	struct tee_ta_ctx *ctx = NULL;
611 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
612 
613 	if (s) {
614 		if (is_static_ta_ctx(s->ctx))
615 			s = TAILQ_NEXT(s, link_tsd);
616 
617 		if (s)
618 			ctx = s->ctx;
619 	}
620 
621 	if (tsd->ctx != ctx)
622 		tee_mmu_set_ctx(ctx);
623 	/*
624 	 * If ctx->mmu == NULL we must not have user mapping active,
625 	 * if ctx->mmu != NULL we must have user mapping active.
626 	 */
627 	if (((ctx && is_user_ta_ctx(ctx) ?
628 			to_user_ta_ctx(ctx)->mmu : NULL) == NULL) ==
629 					core_mmu_user_mapping_is_active())
630 		panic("unexpected active mapping");
631 }
632 
633 void tee_ta_push_current_session(struct tee_ta_session *sess)
634 {
635 	struct thread_specific_data *tsd = thread_get_tsd();
636 
637 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
638 	update_current_ctx(tsd);
639 }
640 
641 struct tee_ta_session *tee_ta_pop_current_session(void)
642 {
643 	struct thread_specific_data *tsd = thread_get_tsd();
644 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
645 
646 	if (s) {
647 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
648 		update_current_ctx(tsd);
649 	}
650 	return s;
651 }
652 
653 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
654 {
655 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
656 
657 	if (!s)
658 		return TEE_ERROR_BAD_STATE;
659 	*sess = s;
660 	return TEE_SUCCESS;
661 }
662 
663 struct tee_ta_session *tee_ta_get_calling_session(void)
664 {
665 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
666 
667 	if (s)
668 		s = TAILQ_NEXT(s, link_tsd);
669 	return s;
670 }
671 
672 TEE_Result tee_ta_get_client_id(TEE_Identity *id)
673 {
674 	TEE_Result res;
675 	struct tee_ta_session *sess;
676 
677 	res = tee_ta_get_current_session(&sess);
678 	if (res != TEE_SUCCESS)
679 		return res;
680 
681 	if (id == NULL)
682 		return TEE_ERROR_BAD_PARAMETERS;
683 
684 	*id = sess->clnt_id;
685 	return TEE_SUCCESS;
686 }
687 
688 /*
689  * dump_state - Display TA state as an error log.
690  */
691 static void dump_state(struct tee_ta_ctx *ctx)
692 {
693 	struct tee_ta_session *s = NULL;
694 	bool active __maybe_unused;
695 
696 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
697 		  s && s->ctx == ctx);
698 
699 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
700 		active ? "(active)" : "");
701 	ctx->ops->dump_state(ctx);
702 }
703 
704 void tee_ta_dump_current(void)
705 {
706 	struct tee_ta_session *s = NULL;
707 
708 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
709 		EMSG("no valid session found, cannot log TA status");
710 		return;
711 	}
712 
713 	dump_state(s->ctx);
714 }
715