xref: /optee_os/core/kernel/tee_ta_manager.c (revision a1cbb728630308fcf902a8953a32cc972d14757e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/mutex.h>
14 #include <kernel/panic.h>
15 #include <kernel/pseudo_ta.h>
16 #include <kernel/tee_common.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/tee_time.h>
20 #include <kernel/thread.h>
21 #include <kernel/user_ta.h>
22 #include <mm/core_mmu.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mmu.h>
26 #include <tee/tee_svc_cryp.h>
27 #include <tee/tee_obj.h>
28 #include <tee/tee_svc_storage.h>
29 #include <tee_api_types.h>
30 #include <trace.h>
31 #include <utee_types.h>
32 #include <util.h>
33 
34 /* This mutex protects the critical section in tee_ta_init_session */
35 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
36 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
37 
38 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
39 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
40 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
41 static size_t tee_ta_single_instance_count;
42 #endif
43 
44 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
45 static void lock_single_instance(void)
46 {
47 }
48 
49 static void unlock_single_instance(void)
50 {
51 }
52 
53 static bool has_single_instance_lock(void)
54 {
55 	return false;
56 }
57 #else
58 static void lock_single_instance(void)
59 {
60 	/* Requires tee_ta_mutex to be held */
61 	if (tee_ta_single_instance_thread != thread_get_id()) {
62 		/* Wait until the single-instance lock is available. */
63 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
64 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
65 
66 		tee_ta_single_instance_thread = thread_get_id();
67 		assert(tee_ta_single_instance_count == 0);
68 	}
69 
70 	tee_ta_single_instance_count++;
71 }
72 
73 static void unlock_single_instance(void)
74 {
75 	/* Requires tee_ta_mutex to be held */
76 	assert(tee_ta_single_instance_thread == thread_get_id());
77 	assert(tee_ta_single_instance_count > 0);
78 
79 	tee_ta_single_instance_count--;
80 	if (tee_ta_single_instance_count == 0) {
81 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
82 		condvar_signal(&tee_ta_cv);
83 	}
84 }
85 
86 static bool has_single_instance_lock(void)
87 {
88 	/* Requires tee_ta_mutex to be held */
89 	return tee_ta_single_instance_thread == thread_get_id();
90 }
91 #endif
92 
93 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
94 {
95 	bool rc = true;
96 
97 	if (ctx->flags & TA_FLAG_CONCURRENT)
98 		return true;
99 
100 	mutex_lock(&tee_ta_mutex);
101 
102 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
103 		lock_single_instance();
104 
105 	if (has_single_instance_lock()) {
106 		if (ctx->busy) {
107 			/*
108 			 * We're holding the single-instance lock and the
109 			 * TA is busy, as waiting now would only cause a
110 			 * dead-lock, we release the lock and return false.
111 			 */
112 			rc = false;
113 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 				unlock_single_instance();
115 		}
116 	} else {
117 		/*
118 		 * We're not holding the single-instance lock, we're free to
119 		 * wait for the TA to become available.
120 		 */
121 		while (ctx->busy)
122 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
123 	}
124 
125 	/* Either it's already true or we should set it to true */
126 	ctx->busy = true;
127 
128 	mutex_unlock(&tee_ta_mutex);
129 	return rc;
130 }
131 
132 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
133 {
134 	if (!tee_ta_try_set_busy(ctx))
135 		panic();
136 }
137 
138 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
139 {
140 	if (ctx->flags & TA_FLAG_CONCURRENT)
141 		return;
142 
143 	mutex_lock(&tee_ta_mutex);
144 
145 	assert(ctx->busy);
146 	ctx->busy = false;
147 	condvar_signal(&ctx->busy_cv);
148 
149 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
150 		unlock_single_instance();
151 
152 	mutex_unlock(&tee_ta_mutex);
153 }
154 
155 static void dec_session_ref_count(struct tee_ta_session *s)
156 {
157 	assert(s->ref_count > 0);
158 	s->ref_count--;
159 	if (s->ref_count == 1)
160 		condvar_signal(&s->refc_cv);
161 }
162 
163 void tee_ta_put_session(struct tee_ta_session *s)
164 {
165 	mutex_lock(&tee_ta_mutex);
166 
167 	if (s->lock_thread == thread_get_id()) {
168 		s->lock_thread = THREAD_ID_INVALID;
169 		condvar_signal(&s->lock_cv);
170 	}
171 	dec_session_ref_count(s);
172 
173 	mutex_unlock(&tee_ta_mutex);
174 }
175 
176 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
177 			struct tee_ta_session_head *open_sessions)
178 {
179 	struct tee_ta_session *s = NULL;
180 	struct tee_ta_session *found = NULL;
181 
182 	TAILQ_FOREACH(s, open_sessions, link) {
183 		if (s->id == id) {
184 			found = s;
185 			break;
186 		}
187 	}
188 
189 	return found;
190 }
191 
192 struct tee_ta_session *tee_ta_find_session(uint32_t id,
193 			struct tee_ta_session_head *open_sessions)
194 {
195 	struct tee_ta_session *s = NULL;
196 
197 	mutex_lock(&tee_ta_mutex);
198 
199 	s = tee_ta_find_session_nolock(id, open_sessions);
200 
201 	mutex_unlock(&tee_ta_mutex);
202 
203 	return s;
204 }
205 
206 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
207 			struct tee_ta_session_head *open_sessions)
208 {
209 	struct tee_ta_session *s;
210 
211 	mutex_lock(&tee_ta_mutex);
212 
213 	while (true) {
214 		s = tee_ta_find_session_nolock(id, open_sessions);
215 		if (!s)
216 			break;
217 		if (s->unlink) {
218 			s = NULL;
219 			break;
220 		}
221 		s->ref_count++;
222 		if (!exclusive)
223 			break;
224 
225 		assert(s->lock_thread != thread_get_id());
226 
227 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
228 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
229 
230 		if (s->unlink) {
231 			dec_session_ref_count(s);
232 			s = NULL;
233 			break;
234 		}
235 
236 		s->lock_thread = thread_get_id();
237 		break;
238 	}
239 
240 	mutex_unlock(&tee_ta_mutex);
241 	return s;
242 }
243 
244 static void tee_ta_unlink_session(struct tee_ta_session *s,
245 			struct tee_ta_session_head *open_sessions)
246 {
247 	mutex_lock(&tee_ta_mutex);
248 
249 	assert(s->ref_count >= 1);
250 	assert(s->lock_thread == thread_get_id());
251 	assert(!s->unlink);
252 
253 	s->unlink = true;
254 	condvar_broadcast(&s->lock_cv);
255 
256 	while (s->ref_count != 1)
257 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
258 
259 	TAILQ_REMOVE(open_sessions, s, link);
260 
261 	mutex_unlock(&tee_ta_mutex);
262 }
263 
264 /*
265  * tee_ta_context_find - Find TA in session list based on a UUID (input)
266  * Returns a pointer to the session
267  */
268 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
269 {
270 	struct tee_ta_ctx *ctx;
271 
272 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
273 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
274 			return ctx;
275 	}
276 
277 	return NULL;
278 }
279 
280 /* check if requester (client ID) matches session initial client */
281 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
282 {
283 	if (id == KERN_IDENTITY)
284 		return TEE_SUCCESS;
285 
286 	if (id == NSAPP_IDENTITY) {
287 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
288 			DMSG("nsec tries to hijack TA session");
289 			return TEE_ERROR_ACCESS_DENIED;
290 		}
291 		return TEE_SUCCESS;
292 	}
293 
294 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
295 		DMSG("client id mismatch");
296 		return TEE_ERROR_ACCESS_DENIED;
297 	}
298 	return TEE_SUCCESS;
299 }
300 
301 /*
302  * Check if invocation parameters matches TA properties
303  *
304  * @s - current session handle
305  * @param - already identified memory references hold a valid 'mobj'.
306  *
307  * Policy:
308  * - All TAs can access 'non-secure' shared memory.
309  * - All TAs can access TEE private memory (seccpy)
310  * - Only SDP flagged TAs can accept SDP memory references.
311  */
312 #ifndef CFG_SECURE_DATA_PATH
313 static bool check_params(struct tee_ta_session *sess __unused,
314 			 struct tee_ta_param *param __unused)
315 {
316 	/*
317 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
318 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
319 	 * permissions regarding memory reference parameters.
320 	 */
321 	return true;
322 }
323 #else
324 static bool check_params(struct tee_ta_session *sess,
325 			 struct tee_ta_param *param)
326 {
327 	int n;
328 
329 	/*
330 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
331 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
332 	 */
333 	if (sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
334 		return true;
335 
336 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
337 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
338 		struct param_mem *mem = &param->u[n].mem;
339 
340 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
341 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
342 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
343 			continue;
344 		if (!mem->size)
345 			continue;
346 		if (mobj_is_sdp_mem(mem->mobj))
347 			return false;
348 	}
349 	return true;
350 }
351 #endif
352 
353 static void set_invoke_timeout(struct tee_ta_session *sess,
354 				      uint32_t cancel_req_to)
355 {
356 	TEE_Time current_time;
357 	TEE_Time cancel_time;
358 
359 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
360 		goto infinite;
361 
362 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
363 		goto infinite;
364 
365 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
366 			 &cancel_time.seconds))
367 		goto infinite;
368 
369 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
370 	if (cancel_time.millis > 1000) {
371 		if (ADD_OVERFLOW(current_time.seconds, 1,
372 				 &cancel_time.seconds))
373 			goto infinite;
374 
375 		cancel_time.seconds++;
376 		cancel_time.millis -= 1000;
377 	}
378 
379 	sess->cancel_time = cancel_time;
380 	return;
381 
382 infinite:
383 	sess->cancel_time.seconds = UINT32_MAX;
384 	sess->cancel_time.millis = UINT32_MAX;
385 }
386 
387 /*-----------------------------------------------------------------------------
388  * Close a Trusted Application and free available resources
389  *---------------------------------------------------------------------------*/
390 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
391 				struct tee_ta_session_head *open_sessions,
392 				const TEE_Identity *clnt_id)
393 {
394 	struct tee_ta_session *sess;
395 	struct tee_ta_ctx *ctx;
396 	bool keep_alive;
397 
398 	DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id);
399 
400 	if (!csess)
401 		return TEE_ERROR_ITEM_NOT_FOUND;
402 
403 	sess = tee_ta_get_session(csess->id, true, open_sessions);
404 
405 	if (!sess) {
406 		EMSG("session 0x%" PRIxVA " to be removed is not found",
407 		     (vaddr_t)csess);
408 		return TEE_ERROR_ITEM_NOT_FOUND;
409 	}
410 
411 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
412 		tee_ta_put_session(sess);
413 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
414 	}
415 
416 	ctx = sess->ctx;
417 	DMSG("Destroy session");
418 
419 	tee_ta_set_busy(ctx);
420 
421 	if (!ctx->panicked) {
422 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
423 		ctx->ops->enter_close_session(sess);
424 	}
425 
426 	tee_ta_unlink_session(sess, open_sessions);
427 #if defined(CFG_TA_GPROF_SUPPORT)
428 	free(sess->sbuf);
429 #endif
430 	free(sess);
431 
432 	tee_ta_clear_busy(ctx);
433 
434 	mutex_lock(&tee_ta_mutex);
435 
436 	if (ctx->ref_count <= 0)
437 		panic();
438 
439 	ctx->ref_count--;
440 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
441 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
442 	if (!ctx->ref_count && !keep_alive) {
443 		DMSG("Destroy TA ctx");
444 
445 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
446 		mutex_unlock(&tee_ta_mutex);
447 
448 		condvar_destroy(&ctx->busy_cv);
449 
450 		pgt_flush_ctx(ctx);
451 		ctx->ops->destroy(ctx);
452 	} else
453 		mutex_unlock(&tee_ta_mutex);
454 
455 	return TEE_SUCCESS;
456 }
457 
458 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
459 			struct tee_ta_session *s)
460 {
461 	/*
462 	 * If TA isn't single instance it should be loaded as new
463 	 * instance instead of doing anything with this instance.
464 	 * So tell the caller that we didn't find the TA it the
465 	 * caller will load a new instance.
466 	 */
467 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
468 		return TEE_ERROR_ITEM_NOT_FOUND;
469 
470 	/*
471 	 * The TA is single instance, if it isn't multi session we
472 	 * can't create another session unless its reference is zero
473 	 */
474 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
475 		return TEE_ERROR_BUSY;
476 
477 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
478 
479 	ctx->ref_count++;
480 	s->ctx = ctx;
481 	return TEE_SUCCESS;
482 }
483 
484 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
485 {
486 	struct tee_ta_session *last = NULL;
487 	uint32_t saved = 0;
488 	uint32_t id = 1;
489 
490 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
491 	if (last) {
492 		/* This value is less likely to be already used */
493 		id = last->id + 1;
494 		if (!id)
495 			id++; /* 0 is not valid */
496 	}
497 
498 	saved = id;
499 	do {
500 		if (!tee_ta_find_session_nolock(id, open_sessions))
501 			return id;
502 		id++;
503 		if (!id)
504 			id++;
505 	} while (id != saved);
506 
507 	return 0;
508 }
509 
510 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
511 				struct tee_ta_session_head *open_sessions,
512 				const TEE_UUID *uuid,
513 				struct tee_ta_session **sess)
514 {
515 	TEE_Result res;
516 	struct tee_ta_ctx *ctx;
517 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
518 
519 	*err = TEE_ORIGIN_TEE;
520 	if (!s)
521 		return TEE_ERROR_OUT_OF_MEMORY;
522 
523 	s->cancel_mask = true;
524 	condvar_init(&s->refc_cv);
525 	condvar_init(&s->lock_cv);
526 	s->lock_thread = THREAD_ID_INVALID;
527 	s->ref_count = 1;
528 
529 
530 	/*
531 	 * We take the global TA mutex here and hold it while doing
532 	 * RPC to load the TA. This big critical section should be broken
533 	 * down into smaller pieces.
534 	 */
535 
536 
537 	mutex_lock(&tee_ta_mutex);
538 	s->id = new_session_id(open_sessions);
539 	if (!s->id) {
540 		res = TEE_ERROR_OVERFLOW;
541 		goto out;
542 	}
543 	TAILQ_INSERT_TAIL(open_sessions, s, link);
544 
545 	/* Look for already loaded TA */
546 	ctx = tee_ta_context_find(uuid);
547 	if (ctx) {
548 		res = tee_ta_init_session_with_context(ctx, s);
549 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
550 			goto out;
551 	}
552 
553 	/* Look for pseudo TA */
554 	res = tee_ta_init_pseudo_ta_session(uuid, s);
555 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
556 		goto out;
557 
558 	/* Look for user TA */
559 	res = tee_ta_init_user_ta_session(uuid, s);
560 
561 out:
562 	if (res == TEE_SUCCESS) {
563 		*sess = s;
564 	} else {
565 		TAILQ_REMOVE(open_sessions, s, link);
566 		free(s);
567 	}
568 	mutex_unlock(&tee_ta_mutex);
569 	return res;
570 }
571 
572 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
573 			       struct tee_ta_session **sess,
574 			       struct tee_ta_session_head *open_sessions,
575 			       const TEE_UUID *uuid,
576 			       const TEE_Identity *clnt_id,
577 			       uint32_t cancel_req_to,
578 			       struct tee_ta_param *param)
579 {
580 	TEE_Result res;
581 	struct tee_ta_session *s = NULL;
582 	struct tee_ta_ctx *ctx;
583 	bool panicked;
584 	bool was_busy = false;
585 
586 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
587 	if (res != TEE_SUCCESS) {
588 		DMSG("init session failed 0x%x", res);
589 		return res;
590 	}
591 
592 	if (!check_params(s, param))
593 		return TEE_ERROR_BAD_PARAMETERS;
594 
595 	ctx = s->ctx;
596 
597 	if (ctx->panicked) {
598 		DMSG("panicked, call tee_ta_close_session()");
599 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
600 		*err = TEE_ORIGIN_TEE;
601 		return TEE_ERROR_TARGET_DEAD;
602 	}
603 
604 	*sess = s;
605 	/* Save identity of the owner of the session */
606 	s->clnt_id = *clnt_id;
607 
608 	if (tee_ta_try_set_busy(ctx)) {
609 		set_invoke_timeout(s, cancel_req_to);
610 		res = ctx->ops->enter_open_session(s, param, err);
611 		tee_ta_clear_busy(ctx);
612 	} else {
613 		/* Deadlock avoided */
614 		res = TEE_ERROR_BUSY;
615 		was_busy = true;
616 	}
617 
618 	panicked = ctx->panicked;
619 
620 	tee_ta_put_session(s);
621 	if (panicked || (res != TEE_SUCCESS))
622 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
623 
624 	/*
625 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
626 	 * apart from panicking.
627 	 */
628 	if (panicked || was_busy)
629 		*err = TEE_ORIGIN_TEE;
630 	else
631 		*err = TEE_ORIGIN_TRUSTED_APP;
632 
633 	if (res != TEE_SUCCESS)
634 		EMSG("Failed. Return error 0x%x", res);
635 
636 	return res;
637 }
638 
639 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
640 				 struct tee_ta_session *sess,
641 				 const TEE_Identity *clnt_id,
642 				 uint32_t cancel_req_to, uint32_t cmd,
643 				 struct tee_ta_param *param)
644 {
645 	TEE_Result res;
646 
647 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
648 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
649 
650 	if (!check_params(sess, param))
651 		return TEE_ERROR_BAD_PARAMETERS;
652 
653 	if (sess->ctx->panicked) {
654 		DMSG("Panicked !");
655 		*err = TEE_ORIGIN_TEE;
656 		return TEE_ERROR_TARGET_DEAD;
657 	}
658 
659 	tee_ta_set_busy(sess->ctx);
660 
661 	set_invoke_timeout(sess, cancel_req_to);
662 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
663 
664 	if (sess->ctx->panicked) {
665 		*err = TEE_ORIGIN_TEE;
666 		res = TEE_ERROR_TARGET_DEAD;
667 	}
668 
669 	tee_ta_clear_busy(sess->ctx);
670 
671 	/* Short buffer is not an effective error case */
672 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
673 		DMSG("Error: %x of %d", res, *err);
674 
675 	return res;
676 }
677 
678 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
679 				 struct tee_ta_session *sess,
680 				 const TEE_Identity *clnt_id)
681 {
682 	*err = TEE_ORIGIN_TEE;
683 
684 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
685 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
686 
687 	sess->cancel = true;
688 	return TEE_SUCCESS;
689 }
690 
691 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
692 {
693 	TEE_Time current_time;
694 
695 	if (s->cancel_mask)
696 		return false;
697 
698 	if (s->cancel)
699 		return true;
700 
701 	if (s->cancel_time.seconds == UINT32_MAX)
702 		return false;
703 
704 	if (curr_time != NULL)
705 		current_time = *curr_time;
706 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
707 		return false;
708 
709 	if (current_time.seconds > s->cancel_time.seconds ||
710 	    (current_time.seconds == s->cancel_time.seconds &&
711 	     current_time.millis >= s->cancel_time.millis)) {
712 		return true;
713 	}
714 
715 	return false;
716 }
717 
718 static void update_current_ctx(struct thread_specific_data *tsd)
719 {
720 	struct tee_ta_ctx *ctx = NULL;
721 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
722 
723 	if (s) {
724 		if (is_pseudo_ta_ctx(s->ctx))
725 			s = TAILQ_NEXT(s, link_tsd);
726 
727 		if (s)
728 			ctx = s->ctx;
729 	}
730 
731 	if (tsd->ctx != ctx)
732 		tee_mmu_set_ctx(ctx);
733 	/*
734 	 * If ctx->mmu == NULL we must not have user mapping active,
735 	 * if ctx->mmu != NULL we must have user mapping active.
736 	 */
737 	if (((ctx && is_user_ta_ctx(ctx) ?
738 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
739 					core_mmu_user_mapping_is_active())
740 		panic("unexpected active mapping");
741 }
742 
743 void tee_ta_push_current_session(struct tee_ta_session *sess)
744 {
745 	struct thread_specific_data *tsd = thread_get_tsd();
746 
747 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
748 	update_current_ctx(tsd);
749 }
750 
751 struct tee_ta_session *tee_ta_pop_current_session(void)
752 {
753 	struct thread_specific_data *tsd = thread_get_tsd();
754 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
755 
756 	if (s) {
757 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
758 		update_current_ctx(tsd);
759 	}
760 	return s;
761 }
762 
763 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
764 {
765 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
766 
767 	if (!s)
768 		return TEE_ERROR_BAD_STATE;
769 	*sess = s;
770 	return TEE_SUCCESS;
771 }
772 
773 struct tee_ta_session *tee_ta_get_calling_session(void)
774 {
775 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
776 
777 	if (s)
778 		s = TAILQ_NEXT(s, link_tsd);
779 	return s;
780 }
781 
782 /*
783  * dump_state - Display TA state as an error log.
784  */
785 static void dump_state(struct tee_ta_ctx *ctx)
786 {
787 	struct tee_ta_session *s = NULL;
788 	bool active __maybe_unused;
789 
790 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
791 		  s && s->ctx == ctx);
792 
793 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
794 		active ? "(active)" : "");
795 	ctx->ops->dump_state(ctx);
796 }
797 
798 void tee_ta_dump_current(void)
799 {
800 	struct tee_ta_session *s = NULL;
801 
802 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
803 		EMSG("no valid session found, cannot log TA status");
804 		return;
805 	}
806 
807 	dump_state(s->ctx);
808 }
809 
810 #if defined(CFG_TA_GPROF_SUPPORT)
811 void tee_ta_gprof_sample_pc(vaddr_t pc)
812 {
813 	struct tee_ta_session *s;
814 	struct sample_buf *sbuf;
815 	size_t idx;
816 
817 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
818 		return;
819 	sbuf = s->sbuf;
820 	if (!sbuf || !sbuf->enabled)
821 		return; /* PC sampling is not enabled */
822 
823 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
824 	if (idx < sbuf->nsamples)
825 		sbuf->samples[idx]++;
826 	sbuf->count++;
827 }
828 
829 /*
830  * Update user-mode CPU time for the current session
831  * @suspend: true if session is being suspended (leaving user mode), false if
832  * it is resumed (entering user mode)
833  */
834 static void tee_ta_update_session_utime(bool suspend)
835 {
836 	struct tee_ta_session *s;
837 	struct sample_buf *sbuf;
838 	uint64_t now;
839 
840 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
841 		return;
842 	sbuf = s->sbuf;
843 	if (!sbuf)
844 		return;
845 	now = read_cntpct();
846 	if (suspend) {
847 		assert(sbuf->usr_entered);
848 		sbuf->usr += now - sbuf->usr_entered;
849 		sbuf->usr_entered = 0;
850 	} else {
851 		assert(!sbuf->usr_entered);
852 		if (!now)
853 			now++; /* 0 is reserved */
854 		sbuf->usr_entered = now;
855 	}
856 }
857 
858 void tee_ta_update_session_utime_suspend(void)
859 {
860 	tee_ta_update_session_utime(true);
861 }
862 
863 void tee_ta_update_session_utime_resume(void)
864 {
865 	tee_ta_update_session_utime(false);
866 }
867 #endif
868