xref: /optee_os/core/kernel/tee_ta_manager.c (revision 8ddf5a4e3ce277adee040d90758ec08b429e9e4f)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <types_ext.h>
29 #include <stdbool.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <arm.h>
34 #include <assert.h>
35 #include <kernel/mutex.h>
36 #include <kernel/panic.h>
37 #include <kernel/static_ta.h>
38 #include <kernel/tee_common.h>
39 #include <kernel/tee_misc.h>
40 #include <kernel/tee_ta_manager.h>
41 #include <kernel/tee_time.h>
42 #include <kernel/thread.h>
43 #include <kernel/user_ta.h>
44 #include <mm/core_mmu.h>
45 #include <mm/core_memprot.h>
46 #include <mm/tee_mmu.h>
47 #include <tee/tee_svc_cryp.h>
48 #include <tee/tee_obj.h>
49 #include <tee/tee_svc_storage.h>
50 #include <tee_api_types.h>
51 #include <trace.h>
52 #include <utee_types.h>
53 #include <util.h>
54 
55 /* This mutex protects the critical section in tee_ta_init_session */
56 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
57 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
58 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
59 static size_t tee_ta_single_instance_count;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 static void lock_single_instance(void)
63 {
64 	/* Requires tee_ta_mutex to be held */
65 	if (tee_ta_single_instance_thread != thread_get_id()) {
66 		/* Wait until the single-instance lock is available. */
67 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
68 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
69 
70 		tee_ta_single_instance_thread = thread_get_id();
71 		assert(tee_ta_single_instance_count == 0);
72 	}
73 
74 	tee_ta_single_instance_count++;
75 }
76 
77 static void unlock_single_instance(void)
78 {
79 	/* Requires tee_ta_mutex to be held */
80 	assert(tee_ta_single_instance_thread == thread_get_id());
81 	assert(tee_ta_single_instance_count > 0);
82 
83 	tee_ta_single_instance_count--;
84 	if (tee_ta_single_instance_count == 0) {
85 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
86 		condvar_signal(&tee_ta_cv);
87 	}
88 }
89 
90 static bool has_single_instance_lock(void)
91 {
92 	/* Requires tee_ta_mutex to be held */
93 	return tee_ta_single_instance_thread == thread_get_id();
94 }
95 
96 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
97 {
98 	bool rc = true;
99 
100 	mutex_lock(&tee_ta_mutex);
101 
102 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
103 		lock_single_instance();
104 
105 	if (has_single_instance_lock()) {
106 		if (ctx->busy) {
107 			/*
108 			 * We're holding the single-instance lock and the
109 			 * TA is busy, as waiting now would only cause a
110 			 * dead-lock, we release the lock and return false.
111 			 */
112 			rc = false;
113 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 				unlock_single_instance();
115 		}
116 	} else {
117 		/*
118 		 * We're not holding the single-instance lock, we're free to
119 		 * wait for the TA to become available.
120 		 */
121 		while (ctx->busy)
122 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
123 	}
124 
125 	/* Either it's already true or we should set it to true */
126 	ctx->busy = true;
127 
128 	mutex_unlock(&tee_ta_mutex);
129 	return rc;
130 }
131 
132 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
133 {
134 	if (!tee_ta_try_set_busy(ctx))
135 		panic();
136 }
137 
138 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
139 {
140 	mutex_lock(&tee_ta_mutex);
141 
142 	assert(ctx->busy);
143 	ctx->busy = false;
144 	condvar_signal(&ctx->busy_cv);
145 
146 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
147 		unlock_single_instance();
148 
149 	mutex_unlock(&tee_ta_mutex);
150 }
151 
152 static void dec_session_ref_count(struct tee_ta_session *s)
153 {
154 	assert(s->ref_count > 0);
155 	s->ref_count--;
156 	if (s->ref_count == 1)
157 		condvar_signal(&s->refc_cv);
158 }
159 
160 void tee_ta_put_session(struct tee_ta_session *s)
161 {
162 	mutex_lock(&tee_ta_mutex);
163 
164 	if (s->lock_thread == thread_get_id()) {
165 		s->lock_thread = THREAD_ID_INVALID;
166 		condvar_signal(&s->lock_cv);
167 	}
168 	dec_session_ref_count(s);
169 
170 	mutex_unlock(&tee_ta_mutex);
171 }
172 
173 static struct tee_ta_session *find_session(uint32_t id,
174 			struct tee_ta_session_head *open_sessions)
175 {
176 	struct tee_ta_session *s;
177 
178 	TAILQ_FOREACH(s, open_sessions, link) {
179 		if ((vaddr_t)s == id)
180 			return s;
181 	}
182 	return NULL;
183 }
184 
185 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
186 			struct tee_ta_session_head *open_sessions)
187 {
188 	struct tee_ta_session *s;
189 
190 	mutex_lock(&tee_ta_mutex);
191 
192 	while (true) {
193 		s = find_session(id, open_sessions);
194 		if (!s)
195 			break;
196 		if (s->unlink) {
197 			s = NULL;
198 			break;
199 		}
200 		s->ref_count++;
201 		if (!exclusive)
202 			break;
203 
204 		assert(s->lock_thread != thread_get_id());
205 
206 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
207 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
208 
209 		if (s->unlink) {
210 			dec_session_ref_count(s);
211 			s = NULL;
212 			break;
213 		}
214 
215 		s->lock_thread = thread_get_id();
216 		break;
217 	}
218 
219 	mutex_unlock(&tee_ta_mutex);
220 	return s;
221 }
222 
223 static void tee_ta_unlink_session(struct tee_ta_session *s,
224 			struct tee_ta_session_head *open_sessions)
225 {
226 	mutex_lock(&tee_ta_mutex);
227 
228 	assert(s->ref_count >= 1);
229 	assert(s->lock_thread == thread_get_id());
230 	assert(!s->unlink);
231 
232 	s->unlink = true;
233 	condvar_broadcast(&s->lock_cv);
234 
235 	while (s->ref_count != 1)
236 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
237 
238 	TAILQ_REMOVE(open_sessions, s, link);
239 
240 	mutex_unlock(&tee_ta_mutex);
241 }
242 
243 /*
244  * tee_ta_context_find - Find TA in session list based on a UUID (input)
245  * Returns a pointer to the session
246  */
247 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
248 {
249 	struct tee_ta_ctx *ctx;
250 
251 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
252 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
253 			return ctx;
254 	}
255 
256 	return NULL;
257 }
258 
259 /* check if requester (client ID) matches session initial client */
260 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
261 {
262 	if (id == KERN_IDENTITY)
263 		return TEE_SUCCESS;
264 
265 	if (id == NSAPP_IDENTITY) {
266 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
267 			DMSG("nsec tries to hijack TA session");
268 			return TEE_ERROR_ACCESS_DENIED;
269 		}
270 		return TEE_SUCCESS;
271 	}
272 
273 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
274 		DMSG("client id mismatch");
275 		return TEE_ERROR_ACCESS_DENIED;
276 	}
277 	return TEE_SUCCESS;
278 }
279 
280 static void set_invoke_timeout(struct tee_ta_session *sess,
281 				      uint32_t cancel_req_to)
282 {
283 	TEE_Time current_time;
284 	TEE_Time cancel_time = { UINT32_MAX, UINT32_MAX };
285 
286 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
287 		goto out;
288 
289 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
290 		goto out;
291 
292 	/* Check that it doesn't wrap */
293 	if (current_time.seconds + (cancel_req_to / 1000) >=
294 	    current_time.seconds) {
295 		cancel_time.seconds =
296 		    current_time.seconds + cancel_req_to / 1000;
297 		cancel_time.millis = current_time.millis + cancel_req_to % 1000;
298 		if (cancel_time.millis > 1000) {
299 			cancel_time.seconds++;
300 			cancel_time.millis -= 1000;
301 		}
302 	}
303 
304 out:
305 	sess->cancel_time = cancel_time;
306 }
307 
308 /*-----------------------------------------------------------------------------
309  * Close a Trusted Application and free available resources
310  *---------------------------------------------------------------------------*/
311 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
312 				struct tee_ta_session_head *open_sessions,
313 				const TEE_Identity *clnt_id)
314 {
315 	struct tee_ta_session *sess;
316 	struct tee_ta_ctx *ctx;
317 
318 	DMSG("tee_ta_close_session(0x%" PRIxVA ")",  (vaddr_t)csess);
319 
320 	if (!csess)
321 		return TEE_ERROR_ITEM_NOT_FOUND;
322 
323 	sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions);
324 
325 	if (!sess) {
326 		EMSG("session 0x%" PRIxVA " to be removed is not found",
327 		     (vaddr_t)csess);
328 		return TEE_ERROR_ITEM_NOT_FOUND;
329 	}
330 
331 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
332 		tee_ta_put_session(sess);
333 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
334 	}
335 
336 	ctx = sess->ctx;
337 	DMSG("   ... Destroy session");
338 
339 	tee_ta_set_busy(ctx);
340 
341 	if (!ctx->panicked) {
342 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
343 		ctx->ops->enter_close_session(sess);
344 	}
345 
346 	tee_ta_unlink_session(sess, open_sessions);
347 	free(sess);
348 
349 	tee_ta_clear_busy(ctx);
350 
351 	mutex_lock(&tee_ta_mutex);
352 
353 	TEE_ASSERT(ctx->ref_count > 0);
354 	ctx->ref_count--;
355 	if (!ctx->ref_count && !(ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE)) {
356 		DMSG("   ... Destroy TA ctx");
357 
358 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
359 		mutex_unlock(&tee_ta_mutex);
360 
361 		condvar_destroy(&ctx->busy_cv);
362 
363 		ctx->ops->destroy(ctx);
364 	} else
365 		mutex_unlock(&tee_ta_mutex);
366 
367 	return TEE_SUCCESS;
368 }
369 
370 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
371 			struct tee_ta_session *s)
372 {
373 	/*
374 	 * If TA isn't single instance it should be loaded as new
375 	 * instance instead of doing anything with this instance.
376 	 * So tell the caller that we didn't find the TA it the
377 	 * caller will load a new instance.
378 	 */
379 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
380 		return TEE_ERROR_ITEM_NOT_FOUND;
381 
382 	/*
383 	 * The TA is single instance, if it isn't multi session we
384 	 * can't create another session unless it's the first
385 	 * new session towards a keepAlive TA.
386 	 */
387 
388 	if (((ctx->flags & TA_FLAG_MULTI_SESSION) == 0) &&
389 	    !(((ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) != 0) &&
390 	      (ctx->ref_count == 0)))
391 		return TEE_ERROR_BUSY;
392 
393 	DMSG("   ... Re-open TA %pUl", (void *)&ctx->uuid);
394 
395 	ctx->ref_count++;
396 	s->ctx = ctx;
397 	return TEE_SUCCESS;
398 }
399 
400 
401 
402 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
403 				struct tee_ta_session_head *open_sessions,
404 				const TEE_UUID *uuid,
405 				struct tee_ta_session **sess)
406 {
407 	TEE_Result res;
408 	struct tee_ta_ctx *ctx;
409 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
410 
411 	*err = TEE_ORIGIN_TEE;
412 	if (!s)
413 		return TEE_ERROR_OUT_OF_MEMORY;
414 
415 	s->cancel_mask = true;
416 	condvar_init(&s->refc_cv);
417 	condvar_init(&s->lock_cv);
418 	s->lock_thread = THREAD_ID_INVALID;
419 	s->ref_count = 1;
420 
421 
422 	/*
423 	 * We take the global TA mutex here and hold it while doing
424 	 * RPC to load the TA. This big critical section should be broken
425 	 * down into smaller pieces.
426 	 */
427 
428 
429 	mutex_lock(&tee_ta_mutex);
430 	TAILQ_INSERT_TAIL(open_sessions, s, link);
431 
432 	/* Look for already loaded TA */
433 	ctx = tee_ta_context_find(uuid);
434 	if (ctx) {
435 		res = tee_ta_init_session_with_context(ctx, s);
436 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
437 			goto out;
438 	}
439 
440 	/* Look for static TA */
441 	res = tee_ta_init_static_ta_session(uuid, s);
442 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
443 		goto out;
444 
445 	/* Look for user TA */
446 	res = tee_ta_init_user_ta_session(uuid, s);
447 
448 out:
449 	if (res == TEE_SUCCESS) {
450 		*sess = s;
451 	} else {
452 		TAILQ_REMOVE(open_sessions, s, link);
453 		free(s);
454 	}
455 	mutex_unlock(&tee_ta_mutex);
456 	return res;
457 }
458 
459 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
460 			       struct tee_ta_session **sess,
461 			       struct tee_ta_session_head *open_sessions,
462 			       const TEE_UUID *uuid,
463 			       const TEE_Identity *clnt_id,
464 			       uint32_t cancel_req_to,
465 			       struct tee_ta_param *param)
466 {
467 	TEE_Result res;
468 	struct tee_ta_session *s = NULL;
469 	struct tee_ta_ctx *ctx;
470 	bool panicked;
471 	bool was_busy = false;
472 
473 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
474 	if (res != TEE_SUCCESS) {
475 		DMSG("init session failed 0x%x", res);
476 		return res;
477 	}
478 
479 	ctx = s->ctx;
480 
481 	if (ctx->panicked) {
482 		DMSG("panicked, call tee_ta_close_session()");
483 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
484 		*err = TEE_ORIGIN_TEE;
485 		return TEE_ERROR_TARGET_DEAD;
486 	}
487 
488 	*sess = s;
489 	/* Save identity of the owner of the session */
490 	s->clnt_id = *clnt_id;
491 
492 	res = tee_ta_verify_param(s, param);
493 	if (res == TEE_SUCCESS) {
494 		if (tee_ta_try_set_busy(ctx)) {
495 			set_invoke_timeout(s, cancel_req_to);
496 			res = ctx->ops->enter_open_session(s, param, err);
497 			tee_ta_clear_busy(ctx);
498 		} else {
499 			/* Deadlock avoided */
500 			res = TEE_ERROR_BUSY;
501 			was_busy = true;
502 		}
503 	}
504 
505 	panicked = ctx->panicked;
506 
507 	tee_ta_put_session(s);
508 	if (panicked || (res != TEE_SUCCESS))
509 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
510 
511 	/*
512 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
513 	 * apart from panicking.
514 	 */
515 	if (panicked || was_busy)
516 		*err = TEE_ORIGIN_TEE;
517 	else
518 		*err = TEE_ORIGIN_TRUSTED_APP;
519 
520 	if (res != TEE_SUCCESS)
521 		EMSG("Failed. Return error 0x%x", res);
522 
523 	return res;
524 }
525 
526 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
527 				 struct tee_ta_session *sess,
528 				 const TEE_Identity *clnt_id,
529 				 uint32_t cancel_req_to, uint32_t cmd,
530 				 struct tee_ta_param *param)
531 {
532 	TEE_Result res;
533 
534 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
535 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
536 
537 	if (sess->ctx->panicked) {
538 		DMSG("   Panicked !");
539 		*err = TEE_ORIGIN_TEE;
540 		return TEE_ERROR_TARGET_DEAD;
541 	}
542 
543 	tee_ta_set_busy(sess->ctx);
544 
545 	res = tee_ta_verify_param(sess, param);
546 	if (res != TEE_SUCCESS) {
547 		*err = TEE_ORIGIN_TEE;
548 		goto function_exit;
549 	}
550 
551 	set_invoke_timeout(sess, cancel_req_to);
552 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
553 
554 	if (sess->ctx->panicked) {
555 		*err = TEE_ORIGIN_TEE;
556 		res = TEE_ERROR_TARGET_DEAD;
557 	}
558 
559 function_exit:
560 	tee_ta_clear_busy(sess->ctx);
561 	if (res != TEE_SUCCESS)
562 		DMSG("  => Error: %x of %d\n", res, *err);
563 	return res;
564 }
565 
566 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
567 				 struct tee_ta_session *sess,
568 				 const TEE_Identity *clnt_id)
569 {
570 	*err = TEE_ORIGIN_TEE;
571 
572 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
573 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
574 
575 	sess->cancel = true;
576 	return TEE_SUCCESS;
577 }
578 
579 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
580 {
581 	TEE_Time current_time;
582 
583 	if (s->cancel_mask)
584 		return false;
585 
586 	if (s->cancel)
587 		return true;
588 
589 	if (s->cancel_time.seconds == UINT32_MAX)
590 		return false;
591 
592 	if (curr_time != NULL)
593 		current_time = *curr_time;
594 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
595 		return false;
596 
597 	if (current_time.seconds > s->cancel_time.seconds ||
598 	    (current_time.seconds == s->cancel_time.seconds &&
599 	     current_time.millis >= s->cancel_time.millis)) {
600 		return true;
601 	}
602 
603 	return false;
604 }
605 
606 static void update_current_ctx(struct thread_specific_data *tsd)
607 {
608 	struct tee_ta_ctx *ctx = NULL;
609 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
610 
611 	if (s) {
612 		if (is_static_ta_ctx(s->ctx))
613 			s = TAILQ_NEXT(s, link_tsd);
614 
615 		if (s)
616 			ctx = s->ctx;
617 	}
618 
619 	if (tsd->ctx != ctx)
620 		tee_mmu_set_ctx(ctx);
621 	/*
622 	 * If ctx->mmu == NULL we must not have user mapping active,
623 	 * if ctx->mmu != NULL we must have user mapping active.
624 	 */
625 	TEE_ASSERT(((ctx && is_user_ta_ctx(ctx) ?
626 			to_user_ta_ctx(ctx)->mmu : NULL) == NULL) ==
627 		!core_mmu_user_mapping_is_active());
628 }
629 
630 void tee_ta_push_current_session(struct tee_ta_session *sess)
631 {
632 	struct thread_specific_data *tsd = thread_get_tsd();
633 
634 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
635 	update_current_ctx(tsd);
636 }
637 
638 struct tee_ta_session *tee_ta_pop_current_session(void)
639 {
640 	struct thread_specific_data *tsd = thread_get_tsd();
641 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
642 
643 	if (s) {
644 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
645 		update_current_ctx(tsd);
646 	}
647 	return s;
648 }
649 
650 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
651 {
652 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
653 
654 	if (!s)
655 		return TEE_ERROR_BAD_STATE;
656 	*sess = s;
657 	return TEE_SUCCESS;
658 }
659 
660 struct tee_ta_session *tee_ta_get_calling_session(void)
661 {
662 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
663 
664 	if (s)
665 		s = TAILQ_NEXT(s, link_tsd);
666 	return s;
667 }
668 
669 TEE_Result tee_ta_get_client_id(TEE_Identity *id)
670 {
671 	TEE_Result res;
672 	struct tee_ta_session *sess;
673 
674 	res = tee_ta_get_current_session(&sess);
675 	if (res != TEE_SUCCESS)
676 		return res;
677 
678 	if (id == NULL)
679 		return TEE_ERROR_BAD_PARAMETERS;
680 
681 	*id = sess->clnt_id;
682 	return TEE_SUCCESS;
683 }
684 
685 /*
686  * dump_state - Display TA state as an error log.
687  */
688 static void dump_state(struct tee_ta_ctx *ctx)
689 {
690 	struct tee_ta_session *s = NULL;
691 	bool active __maybe_unused;
692 
693 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
694 		  s && s->ctx == ctx);
695 
696 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
697 		active ? "(active)" : "");
698 	ctx->ops->dump_state(ctx);
699 }
700 
701 void tee_ta_dump_current(void)
702 {
703 	struct tee_ta_session *s = NULL;
704 
705 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
706 		EMSG("no valid session found, cannot log TA status");
707 		return;
708 	}
709 
710 	dump_state(s->ctx);
711 }
712