xref: /optee_os/core/kernel/tee_ta_manager.c (revision ef4bc451c262f007562867ea4e5f4ca9f26459fd)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <types_ext.h>
29 #include <stdbool.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <arm.h>
34 #include <assert.h>
35 #include <kernel/mutex.h>
36 #include <kernel/panic.h>
37 #include <kernel/static_ta.h>
38 #include <kernel/tee_common.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tee_ta_manager.h>
41 #include <kernel/tee_time.h>
42 #include <kernel/thread.h>
43 #include <kernel/user_ta.h>
44 #include <mm/core_mmu.h>
45 #include <mm/core_memprot.h>
46 #include <mm/tee_mmu.h>
47 #include <tee/tee_svc_cryp.h>
48 #include <tee/tee_obj.h>
49 #include <tee/tee_svc_storage.h>
50 #include <tee_api_types.h>
51 #include <trace.h>
52 #include <utee_types.h>
53 #include <util.h>
54 
55 /* This mutex protects the critical section in tee_ta_init_session */
56 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
57 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
58 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
59 static size_t tee_ta_single_instance_count;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 static void lock_single_instance(void)
63 {
64 	/* Requires tee_ta_mutex to be held */
65 	if (tee_ta_single_instance_thread != thread_get_id()) {
66 		/* Wait until the single-instance lock is available. */
67 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
68 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
69 
70 		tee_ta_single_instance_thread = thread_get_id();
71 		assert(tee_ta_single_instance_count == 0);
72 	}
73 
74 	tee_ta_single_instance_count++;
75 }
76 
77 static void unlock_single_instance(void)
78 {
79 	/* Requires tee_ta_mutex to be held */
80 	assert(tee_ta_single_instance_thread == thread_get_id());
81 	assert(tee_ta_single_instance_count > 0);
82 
83 	tee_ta_single_instance_count--;
84 	if (tee_ta_single_instance_count == 0) {
85 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
86 		condvar_signal(&tee_ta_cv);
87 	}
88 }
89 
90 static bool has_single_instance_lock(void)
91 {
92 	/* Requires tee_ta_mutex to be held */
93 	return tee_ta_single_instance_thread == thread_get_id();
94 }
95 
96 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
97 {
98 	bool rc = true;
99 
100 	mutex_lock(&tee_ta_mutex);
101 
102 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
103 		lock_single_instance();
104 
105 	if (has_single_instance_lock()) {
106 		if (ctx->busy) {
107 			/*
108 			 * We're holding the single-instance lock and the
109 			 * TA is busy, as waiting now would only cause a
110 			 * dead-lock, we release the lock and return false.
111 			 */
112 			rc = false;
113 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 				unlock_single_instance();
115 		}
116 	} else {
117 		/*
118 		 * We're not holding the single-instance lock, we're free to
119 		 * wait for the TA to become available.
120 		 */
121 		while (ctx->busy)
122 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
123 	}
124 
125 	/* Either it's already true or we should set it to true */
126 	ctx->busy = true;
127 
128 	mutex_unlock(&tee_ta_mutex);
129 	return rc;
130 }
131 
132 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
133 {
134 	if (!tee_ta_try_set_busy(ctx))
135 		panic();
136 }
137 
138 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
139 {
140 	mutex_lock(&tee_ta_mutex);
141 
142 	assert(ctx->busy);
143 	ctx->busy = false;
144 	condvar_signal(&ctx->busy_cv);
145 
146 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
147 		unlock_single_instance();
148 
149 	mutex_unlock(&tee_ta_mutex);
150 }
151 
152 static void dec_session_ref_count(struct tee_ta_session *s)
153 {
154 	assert(s->ref_count > 0);
155 	s->ref_count--;
156 	if (s->ref_count == 1)
157 		condvar_signal(&s->refc_cv);
158 }
159 
160 void tee_ta_put_session(struct tee_ta_session *s)
161 {
162 	mutex_lock(&tee_ta_mutex);
163 
164 	if (s->lock_thread == thread_get_id()) {
165 		s->lock_thread = THREAD_ID_INVALID;
166 		condvar_signal(&s->lock_cv);
167 	}
168 	dec_session_ref_count(s);
169 
170 	mutex_unlock(&tee_ta_mutex);
171 }
172 
173 static struct tee_ta_session *find_session(uint32_t id,
174 			struct tee_ta_session_head *open_sessions)
175 {
176 	struct tee_ta_session *s;
177 
178 	TAILQ_FOREACH(s, open_sessions, link) {
179 		if ((vaddr_t)s == id)
180 			return s;
181 	}
182 	return NULL;
183 }
184 
185 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
186 			struct tee_ta_session_head *open_sessions)
187 {
188 	struct tee_ta_session *s;
189 
190 	mutex_lock(&tee_ta_mutex);
191 
192 	while (true) {
193 		s = find_session(id, open_sessions);
194 		if (!s)
195 			break;
196 		if (s->unlink) {
197 			s = NULL;
198 			break;
199 		}
200 		s->ref_count++;
201 		if (!exclusive)
202 			break;
203 
204 		assert(s->lock_thread != thread_get_id());
205 
206 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
207 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
208 
209 		if (s->unlink) {
210 			dec_session_ref_count(s);
211 			s = NULL;
212 			break;
213 		}
214 
215 		s->lock_thread = thread_get_id();
216 		break;
217 	}
218 
219 	mutex_unlock(&tee_ta_mutex);
220 	return s;
221 }
222 
223 static void tee_ta_unlink_session(struct tee_ta_session *s,
224 			struct tee_ta_session_head *open_sessions)
225 {
226 	mutex_lock(&tee_ta_mutex);
227 
228 	assert(s->ref_count >= 1);
229 	assert(s->lock_thread == thread_get_id());
230 	assert(!s->unlink);
231 
232 	s->unlink = true;
233 	condvar_broadcast(&s->lock_cv);
234 
235 	while (s->ref_count != 1)
236 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
237 
238 	TAILQ_REMOVE(open_sessions, s, link);
239 
240 	mutex_unlock(&tee_ta_mutex);
241 }
242 
243 /*
244  * tee_ta_context_find - Find TA in session list based on a UUID (input)
245  * Returns a pointer to the session
246  */
247 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
248 {
249 	struct tee_ta_ctx *ctx;
250 
251 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
252 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
253 			return ctx;
254 	}
255 
256 	return NULL;
257 }
258 
259 /* check if requester (client ID) matches session initial client */
260 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
261 {
262 	if (id == KERN_IDENTITY)
263 		return TEE_SUCCESS;
264 
265 	if (id == NSAPP_IDENTITY) {
266 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
267 			DMSG("nsec tries to hijack TA session");
268 			return TEE_ERROR_ACCESS_DENIED;
269 		}
270 		return TEE_SUCCESS;
271 	}
272 
273 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
274 		DMSG("client id mismatch");
275 		return TEE_ERROR_ACCESS_DENIED;
276 	}
277 	return TEE_SUCCESS;
278 }
279 
280 static void set_invoke_timeout(struct tee_ta_session *sess,
281 				      uint32_t cancel_req_to)
282 {
283 	TEE_Time current_time;
284 	TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX };
285 
286 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
287 		goto out;
288 
289 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
290 		goto out;
291 
292 	/* Check that it doesn't wrap */
293 	if (current_time.seconds + (cancel_req_to / 1000) >=
294 	    current_time.seconds) {
295 		cancel_time.seconds =
296 		    current_time.seconds + cancel_req_to / 1000;
297 		cancel_time.millis = current_time.millis + cancel_req_to % 1000;
298 		if (cancel_time.millis > 1000) {
299 			cancel_time.seconds++;
300 			cancel_time.millis -= 1000;
301 		}
302 	}
303 
304 out:
305 	sess->cancel_time = cancel_time;
306 }
307 
308 /*-----------------------------------------------------------------------------
309  * Close a Trusted Application and free available resources
310  *---------------------------------------------------------------------------*/
311 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
312 				struct tee_ta_session_head *open_sessions,
313 				const TEE_Identity *clnt_id)
314 {
315 	struct tee_ta_session *sess;
316 	struct tee_ta_ctx *ctx;
317 
318 	DMSG("tee_ta_close_session(0x%" PRIxVA ")",  (vaddr_t)csess);
319 
320 	if (!csess)
321 		return TEE_ERROR_ITEM_NOT_FOUND;
322 
323 	sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions);
324 
325 	if (!sess) {
326 		EMSG("session 0x%" PRIxVA " to be removed is not found",
327 		     (vaddr_t)csess);
328 		return TEE_ERROR_ITEM_NOT_FOUND;
329 	}
330 
331 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
332 		tee_ta_put_session(sess);
333 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
334 	}
335 
336 	ctx = sess->ctx;
337 	DMSG("   ... Destroy session");
338 
339 	tee_ta_set_busy(ctx);
340 
341 	if (!ctx->panicked) {
342 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
343 		ctx->ops->enter_close_session(sess);
344 	}
345 
346 	tee_ta_unlink_session(sess, open_sessions);
347 	free(sess);
348 
349 	tee_ta_clear_busy(ctx);
350 
351 	mutex_lock(&tee_ta_mutex);
352 
353 	if (ctx->ref_count <= 0)
354 		panic();
355 
356 	ctx->ref_count--;
357 	if (!ctx->ref_count && !(ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE)) {
358 		DMSG("   ... Destroy TA ctx");
359 
360 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
361 		mutex_unlock(&tee_ta_mutex);
362 
363 		condvar_destroy(&ctx->busy_cv);
364 
365 		pgt_flush_ctx(ctx);
366 		ctx->ops->destroy(ctx);
367 	} else
368 		mutex_unlock(&tee_ta_mutex);
369 
370 	return TEE_SUCCESS;
371 }
372 
373 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
374 			struct tee_ta_session *s)
375 {
376 	/*
377 	 * If TA isn't single instance it should be loaded as new
378 	 * instance instead of doing anything with this instance.
379 	 * So tell the caller that we didn't find the TA it the
380 	 * caller will load a new instance.
381 	 */
382 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
383 		return TEE_ERROR_ITEM_NOT_FOUND;
384 
385 	/*
386 	 * The TA is single instance, if it isn't multi session we
387 	 * can't create another session unless it's the first
388 	 * new session towards a keepAlive TA.
389 	 */
390 
391 	if (((ctx->flags & TA_FLAG_MULTI_SESSION) == 0) &&
392 	    !(((ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) != 0) &&
393 	      (ctx->ref_count == 0)))
394 		return TEE_ERROR_BUSY;
395 
396 	DMSG("   ... Re-open TA %pUl", (void *)&ctx->uuid);
397 
398 	ctx->ref_count++;
399 	s->ctx = ctx;
400 	return TEE_SUCCESS;
401 }
402 
403 
404 
405 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
406 				struct tee_ta_session_head *open_sessions,
407 				const TEE_UUID *uuid,
408 				struct tee_ta_session **sess)
409 {
410 	TEE_Result res;
411 	struct tee_ta_ctx *ctx;
412 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
413 
414 	*err = TEE_ORIGIN_TEE;
415 	if (!s)
416 		return TEE_ERROR_OUT_OF_MEMORY;
417 
418 	s->cancel_mask = true;
419 	condvar_init(&s->refc_cv);
420 	condvar_init(&s->lock_cv);
421 	s->lock_thread = THREAD_ID_INVALID;
422 	s->ref_count = 1;
423 
424 
425 	/*
426 	 * We take the global TA mutex here and hold it while doing
427 	 * RPC to load the TA. This big critical section should be broken
428 	 * down into smaller pieces.
429 	 */
430 
431 
432 	mutex_lock(&tee_ta_mutex);
433 	TAILQ_INSERT_TAIL(open_sessions, s, link);
434 
435 	/* Look for already loaded TA */
436 	ctx = tee_ta_context_find(uuid);
437 	if (ctx) {
438 		res = tee_ta_init_session_with_context(ctx, s);
439 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
440 			goto out;
441 	}
442 
443 	/* Look for static TA */
444 	res = tee_ta_init_static_ta_session(uuid, s);
445 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
446 		goto out;
447 
448 	/* Look for user TA */
449 	res = tee_ta_init_user_ta_session(uuid, s);
450 
451 out:
452 	if (res == TEE_SUCCESS) {
453 		*sess = s;
454 	} else {
455 		TAILQ_REMOVE(open_sessions, s, link);
456 		free(s);
457 	}
458 	mutex_unlock(&tee_ta_mutex);
459 	return res;
460 }
461 
462 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
463 			       struct tee_ta_session **sess,
464 			       struct tee_ta_session_head *open_sessions,
465 			       const TEE_UUID *uuid,
466 			       const TEE_Identity *clnt_id,
467 			       uint32_t cancel_req_to,
468 			       struct tee_ta_param *param)
469 {
470 	TEE_Result res;
471 	struct tee_ta_session *s = NULL;
472 	struct tee_ta_ctx *ctx;
473 	bool panicked;
474 	bool was_busy = false;
475 
476 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
477 	if (res != TEE_SUCCESS) {
478 		DMSG("init session failed 0x%x", res);
479 		return res;
480 	}
481 
482 	ctx = s->ctx;
483 
484 	if (ctx->panicked) {
485 		DMSG("panicked, call tee_ta_close_session()");
486 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
487 		*err = TEE_ORIGIN_TEE;
488 		return TEE_ERROR_TARGET_DEAD;
489 	}
490 
491 	*sess = s;
492 	/* Save identity of the owner of the session */
493 	s->clnt_id = *clnt_id;
494 
495 	res = tee_ta_verify_param(s, param);
496 	if (res == TEE_SUCCESS) {
497 		if (tee_ta_try_set_busy(ctx)) {
498 			set_invoke_timeout(s, cancel_req_to);
499 			res = ctx->ops->enter_open_session(s, param, err);
500 			tee_ta_clear_busy(ctx);
501 		} else {
502 			/* Deadlock avoided */
503 			res = TEE_ERROR_BUSY;
504 			was_busy = true;
505 		}
506 	}
507 
508 	panicked = ctx->panicked;
509 
510 	tee_ta_put_session(s);
511 	if (panicked || (res != TEE_SUCCESS))
512 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
513 
514 	/*
515 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
516 	 * apart from panicking.
517 	 */
518 	if (panicked || was_busy)
519 		*err = TEE_ORIGIN_TEE;
520 	else
521 		*err = TEE_ORIGIN_TRUSTED_APP;
522 
523 	if (res != TEE_SUCCESS)
524 		EMSG("Failed. Return error 0x%x", res);
525 
526 	return res;
527 }
528 
529 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
530 				 struct tee_ta_session *sess,
531 				 const TEE_Identity *clnt_id,
532 				 uint32_t cancel_req_to, uint32_t cmd,
533 				 struct tee_ta_param *param)
534 {
535 	TEE_Result res;
536 
537 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
538 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
539 
540 	if (sess->ctx->panicked) {
541 		DMSG("   Panicked !");
542 		*err = TEE_ORIGIN_TEE;
543 		return TEE_ERROR_TARGET_DEAD;
544 	}
545 
546 	tee_ta_set_busy(sess->ctx);
547 
548 	res = tee_ta_verify_param(sess, param);
549 	if (res != TEE_SUCCESS) {
550 		*err = TEE_ORIGIN_TEE;
551 		goto function_exit;
552 	}
553 
554 	set_invoke_timeout(sess, cancel_req_to);
555 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
556 
557 	if (sess->ctx->panicked) {
558 		*err = TEE_ORIGIN_TEE;
559 		res = TEE_ERROR_TARGET_DEAD;
560 	}
561 
562 function_exit:
563 	tee_ta_clear_busy(sess->ctx);
564 	if (res != TEE_SUCCESS)
565 		DMSG("  => Error: %x of %d\n", res, *err);
566 	return res;
567 }
568 
569 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
570 				 struct tee_ta_session *sess,
571 				 const TEE_Identity *clnt_id)
572 {
573 	*err = TEE_ORIGIN_TEE;
574 
575 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
576 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
577 
578 	sess->cancel = true;
579 	return TEE_SUCCESS;
580 }
581 
582 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
583 {
584 	TEE_Time current_time;
585 
586 	if (s->cancel_mask)
587 		return false;
588 
589 	if (s->cancel)
590 		return true;
591 
592 	if (s->cancel_time.seconds == UINT32_MAX)
593 		return false;
594 
595 	if (curr_time != NULL)
596 		current_time = *curr_time;
597 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
598 		return false;
599 
600 	if (current_time.seconds > s->cancel_time.seconds ||
601 	    (current_time.seconds == s->cancel_time.seconds &&
602 	     current_time.millis >= s->cancel_time.millis)) {
603 		return true;
604 	}
605 
606 	return false;
607 }
608 
609 static void update_current_ctx(struct thread_specific_data *tsd)
610 {
611 	struct tee_ta_ctx *ctx = NULL;
612 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
613 
614 	if (s) {
615 		if (is_static_ta_ctx(s->ctx))
616 			s = TAILQ_NEXT(s, link_tsd);
617 
618 		if (s)
619 			ctx = s->ctx;
620 	}
621 
622 	if (tsd->ctx != ctx)
623 		tee_mmu_set_ctx(ctx);
624 	/*
625 	 * If ctx->mmu == NULL we must not have user mapping active,
626 	 * if ctx->mmu != NULL we must have user mapping active.
627 	 */
628 	if (((ctx && is_user_ta_ctx(ctx) ?
629 			to_user_ta_ctx(ctx)->mmu : NULL) == NULL) ==
630 					core_mmu_user_mapping_is_active())
631 		panic("unexpected active mapping");
632 }
633 
634 void tee_ta_push_current_session(struct tee_ta_session *sess)
635 {
636 	struct thread_specific_data *tsd = thread_get_tsd();
637 
638 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
639 	update_current_ctx(tsd);
640 }
641 
642 struct tee_ta_session *tee_ta_pop_current_session(void)
643 {
644 	struct thread_specific_data *tsd = thread_get_tsd();
645 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
646 
647 	if (s) {
648 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
649 		update_current_ctx(tsd);
650 	}
651 	return s;
652 }
653 
654 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
655 {
656 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
657 
658 	if (!s)
659 		return TEE_ERROR_BAD_STATE;
660 	*sess = s;
661 	return TEE_SUCCESS;
662 }
663 
664 struct tee_ta_session *tee_ta_get_calling_session(void)
665 {
666 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
667 
668 	if (s)
669 		s = TAILQ_NEXT(s, link_tsd);
670 	return s;
671 }
672 
673 TEE_Result tee_ta_get_client_id(TEE_Identity *id)
674 {
675 	TEE_Result res;
676 	struct tee_ta_session *sess;
677 
678 	res = tee_ta_get_current_session(&sess);
679 	if (res != TEE_SUCCESS)
680 		return res;
681 
682 	if (id == NULL)
683 		return TEE_ERROR_BAD_PARAMETERS;
684 
685 	*id = sess->clnt_id;
686 	return TEE_SUCCESS;
687 }
688 
689 /*
690  * dump_state - Display TA state as an error log.
691  */
692 static void dump_state(struct tee_ta_ctx *ctx)
693 {
694 	struct tee_ta_session *s = NULL;
695 	bool active __maybe_unused;
696 
697 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
698 		  s && s->ctx == ctx);
699 
700 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
701 		active ? "(active)" : "");
702 	ctx->ops->dump_state(ctx);
703 }
704 
705 void tee_ta_dump_current(void)
706 {
707 	struct tee_ta_session *s = NULL;
708 
709 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
710 		EMSG("no valid session found, cannot log TA status");
711 		return;
712 	}
713 
714 	dump_state(s->ctx);
715 }
716