xref: /optee_os/core/kernel/tee_ta_manager.c (revision bceeadce69199418868c558f3131a5a51b927927)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/mutex.h>
14 #include <kernel/panic.h>
15 #include <kernel/pseudo_ta.h>
16 #include <kernel/tee_common.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/tee_time.h>
20 #include <kernel/thread.h>
21 #include <kernel/user_ta.h>
22 #include <mm/core_mmu.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mmu.h>
26 #include <tee/tee_svc_cryp.h>
27 #include <tee/tee_obj.h>
28 #include <tee/tee_svc_storage.h>
29 #include <tee_api_types.h>
30 #include <trace.h>
31 #include <utee_types.h>
32 #include <util.h>
33 
34 /* This mutex protects the critical section in tee_ta_init_session */
35 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
36 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
37 
38 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
39 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
40 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
41 static size_t tee_ta_single_instance_count;
42 #endif
43 
44 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
45 static void lock_single_instance(void)
46 {
47 }
48 
49 static void unlock_single_instance(void)
50 {
51 }
52 
53 static bool has_single_instance_lock(void)
54 {
55 	return false;
56 }
57 #else
58 static void lock_single_instance(void)
59 {
60 	/* Requires tee_ta_mutex to be held */
61 	if (tee_ta_single_instance_thread != thread_get_id()) {
62 		/* Wait until the single-instance lock is available. */
63 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
64 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
65 
66 		tee_ta_single_instance_thread = thread_get_id();
67 		assert(tee_ta_single_instance_count == 0);
68 	}
69 
70 	tee_ta_single_instance_count++;
71 }
72 
73 static void unlock_single_instance(void)
74 {
75 	/* Requires tee_ta_mutex to be held */
76 	assert(tee_ta_single_instance_thread == thread_get_id());
77 	assert(tee_ta_single_instance_count > 0);
78 
79 	tee_ta_single_instance_count--;
80 	if (tee_ta_single_instance_count == 0) {
81 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
82 		condvar_signal(&tee_ta_cv);
83 	}
84 }
85 
86 static bool has_single_instance_lock(void)
87 {
88 	/* Requires tee_ta_mutex to be held */
89 	return tee_ta_single_instance_thread == thread_get_id();
90 }
91 #endif
92 
93 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
94 {
95 	bool rc = true;
96 
97 	if (ctx->flags & TA_FLAG_CONCURRENT)
98 		return true;
99 
100 	mutex_lock(&tee_ta_mutex);
101 
102 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
103 		lock_single_instance();
104 
105 	if (has_single_instance_lock()) {
106 		if (ctx->busy) {
107 			/*
108 			 * We're holding the single-instance lock and the
109 			 * TA is busy, as waiting now would only cause a
110 			 * dead-lock, we release the lock and return false.
111 			 */
112 			rc = false;
113 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 				unlock_single_instance();
115 		}
116 	} else {
117 		/*
118 		 * We're not holding the single-instance lock, we're free to
119 		 * wait for the TA to become available.
120 		 */
121 		while (ctx->busy)
122 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
123 	}
124 
125 	/* Either it's already true or we should set it to true */
126 	ctx->busy = true;
127 
128 	mutex_unlock(&tee_ta_mutex);
129 	return rc;
130 }
131 
132 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
133 {
134 	if (!tee_ta_try_set_busy(ctx))
135 		panic();
136 }
137 
138 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
139 {
140 	if (ctx->flags & TA_FLAG_CONCURRENT)
141 		return;
142 
143 	mutex_lock(&tee_ta_mutex);
144 
145 	assert(ctx->busy);
146 	ctx->busy = false;
147 	condvar_signal(&ctx->busy_cv);
148 
149 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
150 		unlock_single_instance();
151 
152 	mutex_unlock(&tee_ta_mutex);
153 }
154 
155 static void dec_session_ref_count(struct tee_ta_session *s)
156 {
157 	assert(s->ref_count > 0);
158 	s->ref_count--;
159 	if (s->ref_count == 1)
160 		condvar_signal(&s->refc_cv);
161 }
162 
163 void tee_ta_put_session(struct tee_ta_session *s)
164 {
165 	mutex_lock(&tee_ta_mutex);
166 
167 	if (s->lock_thread == thread_get_id()) {
168 		s->lock_thread = THREAD_ID_INVALID;
169 		condvar_signal(&s->lock_cv);
170 	}
171 	dec_session_ref_count(s);
172 
173 	mutex_unlock(&tee_ta_mutex);
174 }
175 
176 static struct tee_ta_session *find_session(uint32_t id,
177 			struct tee_ta_session_head *open_sessions)
178 {
179 	struct tee_ta_session *s;
180 
181 	TAILQ_FOREACH(s, open_sessions, link) {
182 		if ((vaddr_t)s == id)
183 			return s;
184 	}
185 	return NULL;
186 }
187 
188 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
189 			struct tee_ta_session_head *open_sessions)
190 {
191 	struct tee_ta_session *s;
192 
193 	mutex_lock(&tee_ta_mutex);
194 
195 	while (true) {
196 		s = find_session(id, open_sessions);
197 		if (!s)
198 			break;
199 		if (s->unlink) {
200 			s = NULL;
201 			break;
202 		}
203 		s->ref_count++;
204 		if (!exclusive)
205 			break;
206 
207 		assert(s->lock_thread != thread_get_id());
208 
209 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
210 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
211 
212 		if (s->unlink) {
213 			dec_session_ref_count(s);
214 			s = NULL;
215 			break;
216 		}
217 
218 		s->lock_thread = thread_get_id();
219 		break;
220 	}
221 
222 	mutex_unlock(&tee_ta_mutex);
223 	return s;
224 }
225 
226 static void tee_ta_unlink_session(struct tee_ta_session *s,
227 			struct tee_ta_session_head *open_sessions)
228 {
229 	mutex_lock(&tee_ta_mutex);
230 
231 	assert(s->ref_count >= 1);
232 	assert(s->lock_thread == thread_get_id());
233 	assert(!s->unlink);
234 
235 	s->unlink = true;
236 	condvar_broadcast(&s->lock_cv);
237 
238 	while (s->ref_count != 1)
239 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
240 
241 	TAILQ_REMOVE(open_sessions, s, link);
242 
243 	mutex_unlock(&tee_ta_mutex);
244 }
245 
246 /*
247  * tee_ta_context_find - Find TA in session list based on a UUID (input)
248  * Returns a pointer to the session
249  */
250 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
251 {
252 	struct tee_ta_ctx *ctx;
253 
254 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
255 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
256 			return ctx;
257 	}
258 
259 	return NULL;
260 }
261 
262 /* check if requester (client ID) matches session initial client */
263 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
264 {
265 	if (id == KERN_IDENTITY)
266 		return TEE_SUCCESS;
267 
268 	if (id == NSAPP_IDENTITY) {
269 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
270 			DMSG("nsec tries to hijack TA session");
271 			return TEE_ERROR_ACCESS_DENIED;
272 		}
273 		return TEE_SUCCESS;
274 	}
275 
276 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
277 		DMSG("client id mismatch");
278 		return TEE_ERROR_ACCESS_DENIED;
279 	}
280 	return TEE_SUCCESS;
281 }
282 
283 /*
284  * Check if invocation parameters matches TA properties
285  *
286  * @s - current session handle
287  * @param - already identified memory references hold a valid 'mobj'.
288  *
289  * Policy:
290  * - All TAs can access 'non-secure' shared memory.
291  * - All TAs can access TEE private memory (seccpy)
292  * - Only SDP flagged TAs can accept SDP memory references.
293  */
294 #ifndef CFG_SECURE_DATA_PATH
295 static bool check_params(struct tee_ta_session *sess __unused,
296 			 struct tee_ta_param *param __unused)
297 {
298 	/*
299 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
300 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
301 	 * permissions regarding memory reference parameters.
302 	 */
303 	return true;
304 }
305 #else
306 static bool check_params(struct tee_ta_session *sess,
307 			 struct tee_ta_param *param)
308 {
309 	int n;
310 
311 	/*
312 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
313 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
314 	 */
315 	if (sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
316 		return true;
317 
318 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
319 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
320 		struct param_mem *mem = &param->u[n].mem;
321 
322 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
323 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
324 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
325 			continue;
326 		if (!mem->size)
327 			continue;
328 		if (mobj_is_sdp_mem(mem->mobj))
329 			return false;
330 	}
331 	return true;
332 }
333 #endif
334 
335 static void set_invoke_timeout(struct tee_ta_session *sess,
336 				      uint32_t cancel_req_to)
337 {
338 	TEE_Time current_time;
339 	TEE_Time cancel_time;
340 
341 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
342 		goto infinite;
343 
344 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
345 		goto infinite;
346 
347 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
348 			 &cancel_time.seconds))
349 		goto infinite;
350 
351 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
352 	if (cancel_time.millis > 1000) {
353 		if (ADD_OVERFLOW(current_time.seconds, 1,
354 				 &cancel_time.seconds))
355 			goto infinite;
356 
357 		cancel_time.seconds++;
358 		cancel_time.millis -= 1000;
359 	}
360 
361 	sess->cancel_time = cancel_time;
362 	return;
363 
364 infinite:
365 	sess->cancel_time.seconds = UINT32_MAX;
366 	sess->cancel_time.millis = UINT32_MAX;
367 }
368 
369 /*-----------------------------------------------------------------------------
370  * Close a Trusted Application and free available resources
371  *---------------------------------------------------------------------------*/
372 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
373 				struct tee_ta_session_head *open_sessions,
374 				const TEE_Identity *clnt_id)
375 {
376 	struct tee_ta_session *sess;
377 	struct tee_ta_ctx *ctx;
378 	bool keep_alive;
379 
380 	DMSG("tee_ta_close_session(0x%" PRIxVA ")",  (vaddr_t)csess);
381 
382 	if (!csess)
383 		return TEE_ERROR_ITEM_NOT_FOUND;
384 
385 	sess = tee_ta_get_session((vaddr_t)csess, true, open_sessions);
386 
387 	if (!sess) {
388 		EMSG("session 0x%" PRIxVA " to be removed is not found",
389 		     (vaddr_t)csess);
390 		return TEE_ERROR_ITEM_NOT_FOUND;
391 	}
392 
393 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
394 		tee_ta_put_session(sess);
395 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
396 	}
397 
398 	ctx = sess->ctx;
399 	DMSG("Destroy session");
400 
401 	tee_ta_set_busy(ctx);
402 
403 	if (!ctx->panicked) {
404 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
405 		ctx->ops->enter_close_session(sess);
406 	}
407 
408 	tee_ta_unlink_session(sess, open_sessions);
409 #if defined(CFG_TA_GPROF_SUPPORT)
410 	free(sess->sbuf);
411 #endif
412 	free(sess);
413 
414 	tee_ta_clear_busy(ctx);
415 
416 	mutex_lock(&tee_ta_mutex);
417 
418 	if (ctx->ref_count <= 0)
419 		panic();
420 
421 	ctx->ref_count--;
422 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
423 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
424 	if (!ctx->ref_count && !keep_alive) {
425 		DMSG("Destroy TA ctx");
426 
427 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
428 		mutex_unlock(&tee_ta_mutex);
429 
430 		condvar_destroy(&ctx->busy_cv);
431 
432 		pgt_flush_ctx(ctx);
433 		ctx->ops->destroy(ctx);
434 	} else
435 		mutex_unlock(&tee_ta_mutex);
436 
437 	return TEE_SUCCESS;
438 }
439 
440 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
441 			struct tee_ta_session *s)
442 {
443 	/*
444 	 * If TA isn't single instance it should be loaded as new
445 	 * instance instead of doing anything with this instance.
446 	 * So tell the caller that we didn't find the TA it the
447 	 * caller will load a new instance.
448 	 */
449 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
450 		return TEE_ERROR_ITEM_NOT_FOUND;
451 
452 	/*
453 	 * The TA is single instance, if it isn't multi session we
454 	 * can't create another session unless its reference is zero
455 	 */
456 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
457 		return TEE_ERROR_BUSY;
458 
459 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
460 
461 	ctx->ref_count++;
462 	s->ctx = ctx;
463 	return TEE_SUCCESS;
464 }
465 
466 
467 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
468 				struct tee_ta_session_head *open_sessions,
469 				const TEE_UUID *uuid,
470 				struct tee_ta_session **sess)
471 {
472 	TEE_Result res;
473 	struct tee_ta_ctx *ctx;
474 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
475 
476 	*err = TEE_ORIGIN_TEE;
477 	if (!s)
478 		return TEE_ERROR_OUT_OF_MEMORY;
479 
480 	s->cancel_mask = true;
481 	condvar_init(&s->refc_cv);
482 	condvar_init(&s->lock_cv);
483 	s->lock_thread = THREAD_ID_INVALID;
484 	s->ref_count = 1;
485 
486 
487 	/*
488 	 * We take the global TA mutex here and hold it while doing
489 	 * RPC to load the TA. This big critical section should be broken
490 	 * down into smaller pieces.
491 	 */
492 
493 
494 	mutex_lock(&tee_ta_mutex);
495 	TAILQ_INSERT_TAIL(open_sessions, s, link);
496 
497 	/* Look for already loaded TA */
498 	ctx = tee_ta_context_find(uuid);
499 	if (ctx) {
500 		res = tee_ta_init_session_with_context(ctx, s);
501 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
502 			goto out;
503 	}
504 
505 	/* Look for pseudo TA */
506 	res = tee_ta_init_pseudo_ta_session(uuid, s);
507 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
508 		goto out;
509 
510 	/* Look for user TA */
511 	res = tee_ta_init_user_ta_session(uuid, s);
512 
513 out:
514 	if (res == TEE_SUCCESS) {
515 		*sess = s;
516 	} else {
517 		TAILQ_REMOVE(open_sessions, s, link);
518 		free(s);
519 	}
520 	mutex_unlock(&tee_ta_mutex);
521 	return res;
522 }
523 
524 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
525 			       struct tee_ta_session **sess,
526 			       struct tee_ta_session_head *open_sessions,
527 			       const TEE_UUID *uuid,
528 			       const TEE_Identity *clnt_id,
529 			       uint32_t cancel_req_to,
530 			       struct tee_ta_param *param)
531 {
532 	TEE_Result res;
533 	struct tee_ta_session *s = NULL;
534 	struct tee_ta_ctx *ctx;
535 	bool panicked;
536 	bool was_busy = false;
537 
538 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
539 	if (res != TEE_SUCCESS) {
540 		DMSG("init session failed 0x%x", res);
541 		return res;
542 	}
543 
544 	if (!check_params(s, param))
545 		return TEE_ERROR_BAD_PARAMETERS;
546 
547 	ctx = s->ctx;
548 
549 	if (ctx->panicked) {
550 		DMSG("panicked, call tee_ta_close_session()");
551 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
552 		*err = TEE_ORIGIN_TEE;
553 		return TEE_ERROR_TARGET_DEAD;
554 	}
555 
556 	*sess = s;
557 	/* Save identity of the owner of the session */
558 	s->clnt_id = *clnt_id;
559 
560 	if (tee_ta_try_set_busy(ctx)) {
561 		set_invoke_timeout(s, cancel_req_to);
562 		res = ctx->ops->enter_open_session(s, param, err);
563 		tee_ta_clear_busy(ctx);
564 	} else {
565 		/* Deadlock avoided */
566 		res = TEE_ERROR_BUSY;
567 		was_busy = true;
568 	}
569 
570 	panicked = ctx->panicked;
571 
572 	tee_ta_put_session(s);
573 	if (panicked || (res != TEE_SUCCESS))
574 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
575 
576 	/*
577 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
578 	 * apart from panicking.
579 	 */
580 	if (panicked || was_busy)
581 		*err = TEE_ORIGIN_TEE;
582 	else
583 		*err = TEE_ORIGIN_TRUSTED_APP;
584 
585 	if (res != TEE_SUCCESS)
586 		EMSG("Failed. Return error 0x%x", res);
587 
588 	return res;
589 }
590 
591 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
592 				 struct tee_ta_session *sess,
593 				 const TEE_Identity *clnt_id,
594 				 uint32_t cancel_req_to, uint32_t cmd,
595 				 struct tee_ta_param *param)
596 {
597 	TEE_Result res;
598 
599 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
600 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
601 
602 	if (!check_params(sess, param))
603 		return TEE_ERROR_BAD_PARAMETERS;
604 
605 	if (sess->ctx->panicked) {
606 		DMSG("Panicked !");
607 		*err = TEE_ORIGIN_TEE;
608 		return TEE_ERROR_TARGET_DEAD;
609 	}
610 
611 	tee_ta_set_busy(sess->ctx);
612 
613 	set_invoke_timeout(sess, cancel_req_to);
614 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
615 
616 	if (sess->ctx->panicked) {
617 		*err = TEE_ORIGIN_TEE;
618 		res = TEE_ERROR_TARGET_DEAD;
619 	}
620 
621 	tee_ta_clear_busy(sess->ctx);
622 
623 	/* Short buffer is not an effective error case */
624 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
625 		DMSG("Error: %x of %d\n", res, *err);
626 
627 	return res;
628 }
629 
630 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
631 				 struct tee_ta_session *sess,
632 				 const TEE_Identity *clnt_id)
633 {
634 	*err = TEE_ORIGIN_TEE;
635 
636 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
637 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
638 
639 	sess->cancel = true;
640 	return TEE_SUCCESS;
641 }
642 
643 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
644 {
645 	TEE_Time current_time;
646 
647 	if (s->cancel_mask)
648 		return false;
649 
650 	if (s->cancel)
651 		return true;
652 
653 	if (s->cancel_time.seconds == UINT32_MAX)
654 		return false;
655 
656 	if (curr_time != NULL)
657 		current_time = *curr_time;
658 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
659 		return false;
660 
661 	if (current_time.seconds > s->cancel_time.seconds ||
662 	    (current_time.seconds == s->cancel_time.seconds &&
663 	     current_time.millis >= s->cancel_time.millis)) {
664 		return true;
665 	}
666 
667 	return false;
668 }
669 
670 static void update_current_ctx(struct thread_specific_data *tsd)
671 {
672 	struct tee_ta_ctx *ctx = NULL;
673 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
674 
675 	if (s) {
676 		if (is_pseudo_ta_ctx(s->ctx))
677 			s = TAILQ_NEXT(s, link_tsd);
678 
679 		if (s)
680 			ctx = s->ctx;
681 	}
682 
683 	if (tsd->ctx != ctx)
684 		tee_mmu_set_ctx(ctx);
685 	/*
686 	 * If ctx->mmu == NULL we must not have user mapping active,
687 	 * if ctx->mmu != NULL we must have user mapping active.
688 	 */
689 	if (((ctx && is_user_ta_ctx(ctx) ?
690 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
691 					core_mmu_user_mapping_is_active())
692 		panic("unexpected active mapping");
693 }
694 
695 void tee_ta_push_current_session(struct tee_ta_session *sess)
696 {
697 	struct thread_specific_data *tsd = thread_get_tsd();
698 
699 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
700 	update_current_ctx(tsd);
701 }
702 
703 struct tee_ta_session *tee_ta_pop_current_session(void)
704 {
705 	struct thread_specific_data *tsd = thread_get_tsd();
706 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
707 
708 	if (s) {
709 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
710 		update_current_ctx(tsd);
711 	}
712 	return s;
713 }
714 
715 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
716 {
717 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
718 
719 	if (!s)
720 		return TEE_ERROR_BAD_STATE;
721 	*sess = s;
722 	return TEE_SUCCESS;
723 }
724 
725 struct tee_ta_session *tee_ta_get_calling_session(void)
726 {
727 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
728 
729 	if (s)
730 		s = TAILQ_NEXT(s, link_tsd);
731 	return s;
732 }
733 
734 /*
735  * dump_state - Display TA state as an error log.
736  */
737 static void dump_state(struct tee_ta_ctx *ctx)
738 {
739 	struct tee_ta_session *s = NULL;
740 	bool active __maybe_unused;
741 
742 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
743 		  s && s->ctx == ctx);
744 
745 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
746 		active ? "(active)" : "");
747 	ctx->ops->dump_state(ctx);
748 }
749 
750 void tee_ta_dump_current(void)
751 {
752 	struct tee_ta_session *s = NULL;
753 
754 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
755 		EMSG("no valid session found, cannot log TA status");
756 		return;
757 	}
758 
759 	dump_state(s->ctx);
760 }
761 
762 #if defined(CFG_TA_GPROF_SUPPORT)
763 void tee_ta_gprof_sample_pc(vaddr_t pc)
764 {
765 	struct tee_ta_session *s;
766 	struct sample_buf *sbuf;
767 	size_t idx;
768 
769 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
770 		return;
771 	sbuf = s->sbuf;
772 	if (!sbuf || !sbuf->enabled)
773 		return; /* PC sampling is not enabled */
774 
775 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
776 	if (idx < sbuf->nsamples)
777 		sbuf->samples[idx]++;
778 	sbuf->count++;
779 }
780 
781 /*
782  * Update user-mode CPU time for the current session
783  * @suspend: true if session is being suspended (leaving user mode), false if
784  * it is resumed (entering user mode)
785  */
786 static void tee_ta_update_session_utime(bool suspend)
787 {
788 	struct tee_ta_session *s;
789 	struct sample_buf *sbuf;
790 	uint64_t now;
791 
792 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
793 		return;
794 	sbuf = s->sbuf;
795 	if (!sbuf)
796 		return;
797 	now = read_cntpct();
798 	if (suspend) {
799 		assert(sbuf->usr_entered);
800 		sbuf->usr += now - sbuf->usr_entered;
801 		sbuf->usr_entered = 0;
802 	} else {
803 		assert(!sbuf->usr_entered);
804 		if (!now)
805 			now++; /* 0 is reserved */
806 		sbuf->usr_entered = now;
807 	}
808 }
809 
810 void tee_ta_update_session_utime_suspend(void)
811 {
812 	tee_ta_update_session_utime(true);
813 }
814 
815 void tee_ta_update_session_utime_resume(void)
816 {
817 	tee_ta_update_session_utime(false);
818 }
819 #endif
820