xref: /optee_os/core/kernel/tee_ta_manager.c (revision dc57b1101a33ec9bf18ee3d2b88a0d8ff12d2ede)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/mutex.h>
14 #include <kernel/panic.h>
15 #include <kernel/pseudo_ta.h>
16 #include <kernel/tee_common.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/tee_time.h>
20 #include <kernel/thread.h>
21 #include <kernel/user_ta.h>
22 #include <mm/core_mmu.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mmu.h>
26 #include <tee/entry_std.h>
27 #include <tee/tee_svc_cryp.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_storage.h>
30 #include <tee_api_types.h>
31 #include <trace.h>
32 #include <user_ta_header.h>
33 #include <utee_types.h>
34 #include <util.h>
35 
36 /* This mutex protects the critical section in tee_ta_init_session */
37 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
38 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
39 
40 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
41 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
42 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
43 static size_t tee_ta_single_instance_count;
44 #endif
45 
46 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
47 static void lock_single_instance(void)
48 {
49 }
50 
51 static void unlock_single_instance(void)
52 {
53 }
54 
55 static bool has_single_instance_lock(void)
56 {
57 	return false;
58 }
59 #else
60 static void lock_single_instance(void)
61 {
62 	/* Requires tee_ta_mutex to be held */
63 	if (tee_ta_single_instance_thread != thread_get_id()) {
64 		/* Wait until the single-instance lock is available. */
65 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
66 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
67 
68 		tee_ta_single_instance_thread = thread_get_id();
69 		assert(tee_ta_single_instance_count == 0);
70 	}
71 
72 	tee_ta_single_instance_count++;
73 }
74 
75 static void unlock_single_instance(void)
76 {
77 	/* Requires tee_ta_mutex to be held */
78 	assert(tee_ta_single_instance_thread == thread_get_id());
79 	assert(tee_ta_single_instance_count > 0);
80 
81 	tee_ta_single_instance_count--;
82 	if (tee_ta_single_instance_count == 0) {
83 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
84 		condvar_signal(&tee_ta_cv);
85 	}
86 }
87 
88 static bool has_single_instance_lock(void)
89 {
90 	/* Requires tee_ta_mutex to be held */
91 	return tee_ta_single_instance_thread == thread_get_id();
92 }
93 #endif
94 
95 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
96 {
97 	bool rc = true;
98 
99 	if (ctx->flags & TA_FLAG_CONCURRENT)
100 		return true;
101 
102 	mutex_lock(&tee_ta_mutex);
103 
104 	if (ctx->initializing) {
105 		/*
106 		 * Context is still initializing and flags cannot be relied
107 		 * on for user TAs. Wait here until it's initialized.
108 		 */
109 		while (ctx->busy)
110 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
111 	}
112 
113 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 		lock_single_instance();
115 
116 	if (has_single_instance_lock()) {
117 		if (ctx->busy) {
118 			/*
119 			 * We're holding the single-instance lock and the
120 			 * TA is busy, as waiting now would only cause a
121 			 * dead-lock, we release the lock and return false.
122 			 */
123 			rc = false;
124 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
125 				unlock_single_instance();
126 		}
127 	} else {
128 		/*
129 		 * We're not holding the single-instance lock, we're free to
130 		 * wait for the TA to become available.
131 		 */
132 		while (ctx->busy)
133 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
134 	}
135 
136 	/* Either it's already true or we should set it to true */
137 	ctx->busy = true;
138 
139 	mutex_unlock(&tee_ta_mutex);
140 	return rc;
141 }
142 
143 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
144 {
145 	if (!tee_ta_try_set_busy(ctx))
146 		panic();
147 }
148 
149 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
150 {
151 	if (ctx->flags & TA_FLAG_CONCURRENT)
152 		return;
153 
154 	mutex_lock(&tee_ta_mutex);
155 
156 	assert(ctx->busy);
157 	ctx->busy = false;
158 	condvar_signal(&ctx->busy_cv);
159 
160 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
161 		unlock_single_instance();
162 
163 	ctx->initializing = false;
164 
165 	mutex_unlock(&tee_ta_mutex);
166 }
167 
168 static void dec_session_ref_count(struct tee_ta_session *s)
169 {
170 	assert(s->ref_count > 0);
171 	s->ref_count--;
172 	if (s->ref_count == 1)
173 		condvar_signal(&s->refc_cv);
174 }
175 
176 void tee_ta_put_session(struct tee_ta_session *s)
177 {
178 	mutex_lock(&tee_ta_mutex);
179 
180 	if (s->lock_thread == thread_get_id()) {
181 		s->lock_thread = THREAD_ID_INVALID;
182 		condvar_signal(&s->lock_cv);
183 	}
184 	dec_session_ref_count(s);
185 
186 	mutex_unlock(&tee_ta_mutex);
187 }
188 
189 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
190 			struct tee_ta_session_head *open_sessions)
191 {
192 	struct tee_ta_session *s = NULL;
193 	struct tee_ta_session *found = NULL;
194 
195 	TAILQ_FOREACH(s, open_sessions, link) {
196 		if (s->id == id) {
197 			found = s;
198 			break;
199 		}
200 	}
201 
202 	return found;
203 }
204 
205 struct tee_ta_session *tee_ta_find_session(uint32_t id,
206 			struct tee_ta_session_head *open_sessions)
207 {
208 	struct tee_ta_session *s = NULL;
209 
210 	mutex_lock(&tee_ta_mutex);
211 
212 	s = tee_ta_find_session_nolock(id, open_sessions);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 
216 	return s;
217 }
218 
219 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
220 			struct tee_ta_session_head *open_sessions)
221 {
222 	struct tee_ta_session *s;
223 
224 	mutex_lock(&tee_ta_mutex);
225 
226 	while (true) {
227 		s = tee_ta_find_session_nolock(id, open_sessions);
228 		if (!s)
229 			break;
230 		if (s->unlink) {
231 			s = NULL;
232 			break;
233 		}
234 		s->ref_count++;
235 		if (!exclusive)
236 			break;
237 
238 		assert(s->lock_thread != thread_get_id());
239 
240 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
241 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
242 
243 		if (s->unlink) {
244 			dec_session_ref_count(s);
245 			s = NULL;
246 			break;
247 		}
248 
249 		s->lock_thread = thread_get_id();
250 		break;
251 	}
252 
253 	mutex_unlock(&tee_ta_mutex);
254 	return s;
255 }
256 
257 static void tee_ta_unlink_session(struct tee_ta_session *s,
258 			struct tee_ta_session_head *open_sessions)
259 {
260 	mutex_lock(&tee_ta_mutex);
261 
262 	assert(s->ref_count >= 1);
263 	assert(s->lock_thread == thread_get_id());
264 	assert(!s->unlink);
265 
266 	s->unlink = true;
267 	condvar_broadcast(&s->lock_cv);
268 
269 	while (s->ref_count != 1)
270 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
271 
272 	TAILQ_REMOVE(open_sessions, s, link);
273 
274 	mutex_unlock(&tee_ta_mutex);
275 }
276 
277 static void destroy_session(struct tee_ta_session *s,
278 			    struct tee_ta_session_head *open_sessions)
279 {
280 #if defined(CFG_TA_FTRACE_SUPPORT)
281 	if (s->ctx && s->ctx->ops->dump_ftrace) {
282 		tee_ta_push_current_session(s);
283 		s->ctx->ops->dump_ftrace(s->ctx);
284 		tee_ta_pop_current_session();
285 	}
286 #endif
287 
288 	tee_ta_unlink_session(s, open_sessions);
289 #if defined(CFG_TA_GPROF_SUPPORT)
290 	free(s->sbuf);
291 #endif
292 	free(s);
293 }
294 
295 static void destroy_context(struct tee_ta_ctx *ctx)
296 {
297 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
298 
299 	condvar_destroy(&ctx->busy_cv);
300 	pgt_flush_ctx(ctx);
301 	ctx->ops->destroy(ctx);
302 }
303 
304 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
305 {
306 	struct tee_ta_session *sess = NULL;
307 	struct tee_ta_session_head *open_sessions = NULL;
308 	struct tee_ta_ctx *ctx = NULL;
309 	struct user_ta_ctx *utc = NULL;
310 	size_t count = 1; /* start counting the references to the context */
311 
312 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
313 
314 	mutex_lock(&tee_ta_mutex);
315 	nsec_sessions_list_head(&open_sessions);
316 
317 	/*
318 	 * Next two loops will remove all references to the context which is
319 	 * about to be destroyed, but avoiding such operation to the current
320 	 * session. That will be done later in this function, only after
321 	 * the context will be properly destroyed.
322 	 */
323 
324 	/*
325 	 * Scan the entire list of opened sessions by the clients from
326 	 * non-secure world.
327 	 */
328 	TAILQ_FOREACH(sess, open_sessions, link) {
329 		if (sess->ctx == s->ctx && sess != s) {
330 			sess->ctx = NULL;
331 			count++;
332 		}
333 	}
334 
335 	/*
336 	 * Scan all sessions opened from secure side by searching through
337 	 * all available TA instances and for each context, scan all opened
338 	 * sessions.
339 	 */
340 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
341 		if (is_user_ta_ctx(ctx)) {
342 			utc = to_user_ta_ctx(ctx);
343 
344 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
345 				if (sess->ctx == s->ctx && sess != s) {
346 					sess->ctx = NULL;
347 					count++;
348 				}
349 			}
350 		}
351 	}
352 
353 	assert(count == s->ctx->ref_count);
354 
355 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
356 	mutex_unlock(&tee_ta_mutex);
357 
358 	destroy_context(s->ctx);
359 
360 	s->ctx = NULL;
361 }
362 
363 /*
364  * tee_ta_context_find - Find TA in session list based on a UUID (input)
365  * Returns a pointer to the session
366  */
367 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
368 {
369 	struct tee_ta_ctx *ctx;
370 
371 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
372 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
373 			return ctx;
374 	}
375 
376 	return NULL;
377 }
378 
379 /* check if requester (client ID) matches session initial client */
380 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
381 {
382 	if (id == KERN_IDENTITY)
383 		return TEE_SUCCESS;
384 
385 	if (id == NSAPP_IDENTITY) {
386 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
387 			DMSG("nsec tries to hijack TA session");
388 			return TEE_ERROR_ACCESS_DENIED;
389 		}
390 		return TEE_SUCCESS;
391 	}
392 
393 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
394 		DMSG("client id mismatch");
395 		return TEE_ERROR_ACCESS_DENIED;
396 	}
397 	return TEE_SUCCESS;
398 }
399 
400 /*
401  * Check if invocation parameters matches TA properties
402  *
403  * @s - current session handle
404  * @param - already identified memory references hold a valid 'mobj'.
405  *
406  * Policy:
407  * - All TAs can access 'non-secure' shared memory.
408  * - All TAs can access TEE private memory (seccpy)
409  * - Only SDP flagged TAs can accept SDP memory references.
410  */
411 #ifndef CFG_SECURE_DATA_PATH
412 static bool check_params(struct tee_ta_session *sess __unused,
413 			 struct tee_ta_param *param __unused)
414 {
415 	/*
416 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
417 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
418 	 * permissions regarding memory reference parameters.
419 	 */
420 	return true;
421 }
422 #else
423 static bool check_params(struct tee_ta_session *sess,
424 			 struct tee_ta_param *param)
425 {
426 	int n;
427 
428 	/*
429 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
430 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
431 	 */
432 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
433 		return true;
434 
435 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
436 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
437 		struct param_mem *mem = &param->u[n].mem;
438 
439 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
440 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
441 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
442 			continue;
443 		if (!mem->size)
444 			continue;
445 		if (mobj_is_sdp_mem(mem->mobj))
446 			return false;
447 	}
448 	return true;
449 }
450 #endif
451 
452 static void set_invoke_timeout(struct tee_ta_session *sess,
453 				      uint32_t cancel_req_to)
454 {
455 	TEE_Time current_time;
456 	TEE_Time cancel_time;
457 
458 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
459 		goto infinite;
460 
461 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
462 		goto infinite;
463 
464 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
465 			 &cancel_time.seconds))
466 		goto infinite;
467 
468 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
469 	if (cancel_time.millis > 1000) {
470 		if (ADD_OVERFLOW(current_time.seconds, 1,
471 				 &cancel_time.seconds))
472 			goto infinite;
473 
474 		cancel_time.seconds++;
475 		cancel_time.millis -= 1000;
476 	}
477 
478 	sess->cancel_time = cancel_time;
479 	return;
480 
481 infinite:
482 	sess->cancel_time.seconds = UINT32_MAX;
483 	sess->cancel_time.millis = UINT32_MAX;
484 }
485 
486 /*-----------------------------------------------------------------------------
487  * Close a Trusted Application and free available resources
488  *---------------------------------------------------------------------------*/
489 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
490 				struct tee_ta_session_head *open_sessions,
491 				const TEE_Identity *clnt_id)
492 {
493 	struct tee_ta_session *sess;
494 	struct tee_ta_ctx *ctx;
495 	bool keep_alive;
496 
497 	DMSG("csess 0x%" PRIxVA " id %u",
498 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
499 
500 	if (!csess)
501 		return TEE_ERROR_ITEM_NOT_FOUND;
502 
503 	sess = tee_ta_get_session(csess->id, true, open_sessions);
504 
505 	if (!sess) {
506 		EMSG("session 0x%" PRIxVA " to be removed is not found",
507 		     (vaddr_t)csess);
508 		return TEE_ERROR_ITEM_NOT_FOUND;
509 	}
510 
511 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
512 		tee_ta_put_session(sess);
513 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
514 	}
515 
516 	ctx = sess->ctx;
517 	DMSG("Destroy session");
518 
519 	if (!ctx) {
520 		destroy_session(sess, open_sessions);
521 		return TEE_SUCCESS;
522 	}
523 
524 	if (ctx->panicked) {
525 		destroy_session(sess, open_sessions);
526 	} else {
527 		tee_ta_set_busy(ctx);
528 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
529 		ctx->ops->enter_close_session(sess);
530 		destroy_session(sess, open_sessions);
531 		tee_ta_clear_busy(ctx);
532 	}
533 
534 	mutex_lock(&tee_ta_mutex);
535 
536 	if (ctx->ref_count <= 0)
537 		panic();
538 
539 	ctx->ref_count--;
540 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
541 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
542 	if (!ctx->ref_count && !keep_alive) {
543 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
544 		mutex_unlock(&tee_ta_mutex);
545 
546 		destroy_context(ctx);
547 	} else
548 		mutex_unlock(&tee_ta_mutex);
549 
550 	return TEE_SUCCESS;
551 }
552 
553 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
554 			struct tee_ta_session *s)
555 {
556 	/*
557 	 * If TA isn't single instance it should be loaded as new
558 	 * instance instead of doing anything with this instance.
559 	 * So tell the caller that we didn't find the TA it the
560 	 * caller will load a new instance.
561 	 */
562 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
563 		return TEE_ERROR_ITEM_NOT_FOUND;
564 
565 	/*
566 	 * The TA is single instance, if it isn't multi session we
567 	 * can't create another session unless its reference is zero
568 	 */
569 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
570 		return TEE_ERROR_BUSY;
571 
572 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
573 
574 	ctx->ref_count++;
575 	s->ctx = ctx;
576 	return TEE_SUCCESS;
577 }
578 
579 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
580 {
581 	struct tee_ta_session *last = NULL;
582 	uint32_t saved = 0;
583 	uint32_t id = 1;
584 
585 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
586 	if (last) {
587 		/* This value is less likely to be already used */
588 		id = last->id + 1;
589 		if (!id)
590 			id++; /* 0 is not valid */
591 	}
592 
593 	saved = id;
594 	do {
595 		if (!tee_ta_find_session_nolock(id, open_sessions))
596 			return id;
597 		id++;
598 		if (!id)
599 			id++;
600 	} while (id != saved);
601 
602 	return 0;
603 }
604 
605 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
606 				struct tee_ta_session_head *open_sessions,
607 				const TEE_UUID *uuid,
608 				struct tee_ta_session **sess)
609 {
610 	TEE_Result res;
611 	struct tee_ta_ctx *ctx;
612 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
613 
614 	*err = TEE_ORIGIN_TEE;
615 	if (!s)
616 		return TEE_ERROR_OUT_OF_MEMORY;
617 
618 	s->cancel_mask = true;
619 	condvar_init(&s->refc_cv);
620 	condvar_init(&s->lock_cv);
621 	s->lock_thread = THREAD_ID_INVALID;
622 	s->ref_count = 1;
623 
624 
625 	/*
626 	 * We take the global TA mutex here and hold it while doing
627 	 * RPC to load the TA. This big critical section should be broken
628 	 * down into smaller pieces.
629 	 */
630 
631 
632 	mutex_lock(&tee_ta_mutex);
633 	s->id = new_session_id(open_sessions);
634 	if (!s->id) {
635 		res = TEE_ERROR_OVERFLOW;
636 		goto out;
637 	}
638 	TAILQ_INSERT_TAIL(open_sessions, s, link);
639 
640 	/* Look for already loaded TA */
641 	ctx = tee_ta_context_find(uuid);
642 	if (ctx) {
643 		res = tee_ta_init_session_with_context(ctx, s);
644 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
645 			goto out;
646 	}
647 
648 	/* Look for pseudo TA */
649 	res = tee_ta_init_pseudo_ta_session(uuid, s);
650 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
651 		goto out;
652 
653 	/* Look for user TA */
654 	res = tee_ta_init_user_ta_session(uuid, s);
655 
656 out:
657 	if (res == TEE_SUCCESS) {
658 		*sess = s;
659 	} else {
660 		TAILQ_REMOVE(open_sessions, s, link);
661 		free(s);
662 	}
663 	mutex_unlock(&tee_ta_mutex);
664 	return res;
665 }
666 
667 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
668 			       struct tee_ta_session **sess,
669 			       struct tee_ta_session_head *open_sessions,
670 			       const TEE_UUID *uuid,
671 			       const TEE_Identity *clnt_id,
672 			       uint32_t cancel_req_to,
673 			       struct tee_ta_param *param)
674 {
675 	TEE_Result res;
676 	struct tee_ta_session *s = NULL;
677 	struct tee_ta_ctx *ctx;
678 	bool panicked;
679 	bool was_busy = false;
680 
681 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
682 	if (res != TEE_SUCCESS) {
683 		DMSG("init session failed 0x%x", res);
684 		return res;
685 	}
686 
687 	if (!check_params(s, param))
688 		return TEE_ERROR_BAD_PARAMETERS;
689 
690 	ctx = s->ctx;
691 
692 	if (!ctx || ctx->panicked) {
693 		DMSG("panicked, call tee_ta_close_session()");
694 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
695 		*err = TEE_ORIGIN_TEE;
696 		return TEE_ERROR_TARGET_DEAD;
697 	}
698 
699 	*sess = s;
700 	/* Save identity of the owner of the session */
701 	s->clnt_id = *clnt_id;
702 
703 	if (tee_ta_try_set_busy(ctx)) {
704 		set_invoke_timeout(s, cancel_req_to);
705 		res = ctx->ops->enter_open_session(s, param, err);
706 		tee_ta_clear_busy(ctx);
707 	} else {
708 		/* Deadlock avoided */
709 		res = TEE_ERROR_BUSY;
710 		was_busy = true;
711 	}
712 
713 	panicked = ctx->panicked;
714 
715 	tee_ta_put_session(s);
716 	if (panicked || (res != TEE_SUCCESS))
717 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
718 
719 	/*
720 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
721 	 * apart from panicking.
722 	 */
723 	if (panicked || was_busy)
724 		*err = TEE_ORIGIN_TEE;
725 
726 	if (res != TEE_SUCCESS)
727 		EMSG("Failed. Return error 0x%x", res);
728 
729 	return res;
730 }
731 
732 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
733 				 struct tee_ta_session *sess,
734 				 const TEE_Identity *clnt_id,
735 				 uint32_t cancel_req_to, uint32_t cmd,
736 				 struct tee_ta_param *param)
737 {
738 	TEE_Result res;
739 
740 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
741 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
742 
743 	if (!check_params(sess, param))
744 		return TEE_ERROR_BAD_PARAMETERS;
745 
746 	if (!sess->ctx) {
747 		/* The context has been already destroyed */
748 		*err = TEE_ORIGIN_TEE;
749 		return TEE_ERROR_TARGET_DEAD;
750 	} else if (sess->ctx->panicked) {
751 		DMSG("Panicked !");
752 		destroy_ta_ctx_from_session(sess);
753 		*err = TEE_ORIGIN_TEE;
754 		return TEE_ERROR_TARGET_DEAD;
755 	}
756 
757 	tee_ta_set_busy(sess->ctx);
758 
759 	set_invoke_timeout(sess, cancel_req_to);
760 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
761 
762 	tee_ta_clear_busy(sess->ctx);
763 
764 	if (sess->ctx->panicked) {
765 		destroy_ta_ctx_from_session(sess);
766 		*err = TEE_ORIGIN_TEE;
767 		return TEE_ERROR_TARGET_DEAD;
768 	}
769 
770 	/* Short buffer is not an effective error case */
771 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
772 		DMSG("Error: %x of %d", res, *err);
773 
774 	return res;
775 }
776 
777 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
778 				 struct tee_ta_session *sess,
779 				 const TEE_Identity *clnt_id)
780 {
781 	*err = TEE_ORIGIN_TEE;
782 
783 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
784 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
785 
786 	sess->cancel = true;
787 	return TEE_SUCCESS;
788 }
789 
790 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
791 {
792 	TEE_Time current_time;
793 
794 	if (s->cancel_mask)
795 		return false;
796 
797 	if (s->cancel)
798 		return true;
799 
800 	if (s->cancel_time.seconds == UINT32_MAX)
801 		return false;
802 
803 	if (curr_time != NULL)
804 		current_time = *curr_time;
805 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
806 		return false;
807 
808 	if (current_time.seconds > s->cancel_time.seconds ||
809 	    (current_time.seconds == s->cancel_time.seconds &&
810 	     current_time.millis >= s->cancel_time.millis)) {
811 		return true;
812 	}
813 
814 	return false;
815 }
816 
817 static void update_current_ctx(struct thread_specific_data *tsd)
818 {
819 	struct tee_ta_ctx *ctx = NULL;
820 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
821 
822 	if (s) {
823 		if (is_pseudo_ta_ctx(s->ctx))
824 			s = TAILQ_NEXT(s, link_tsd);
825 
826 		if (s)
827 			ctx = s->ctx;
828 	}
829 
830 	if (tsd->ctx != ctx)
831 		tee_mmu_set_ctx(ctx);
832 	/*
833 	 * If ctx->mmu == NULL we must not have user mapping active,
834 	 * if ctx->mmu != NULL we must have user mapping active.
835 	 */
836 	if (((is_user_ta_ctx(ctx) ?
837 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
838 					core_mmu_user_mapping_is_active())
839 		panic("unexpected active mapping");
840 }
841 
842 void tee_ta_push_current_session(struct tee_ta_session *sess)
843 {
844 	struct thread_specific_data *tsd = thread_get_tsd();
845 
846 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
847 	update_current_ctx(tsd);
848 }
849 
850 struct tee_ta_session *tee_ta_pop_current_session(void)
851 {
852 	struct thread_specific_data *tsd = thread_get_tsd();
853 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
854 
855 	if (s) {
856 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
857 		update_current_ctx(tsd);
858 	}
859 	return s;
860 }
861 
862 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
863 {
864 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
865 
866 	if (!s)
867 		return TEE_ERROR_BAD_STATE;
868 	*sess = s;
869 	return TEE_SUCCESS;
870 }
871 
872 struct tee_ta_session *tee_ta_get_calling_session(void)
873 {
874 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
875 
876 	if (s)
877 		s = TAILQ_NEXT(s, link_tsd);
878 	return s;
879 }
880 
881 #if defined(CFG_TA_GPROF_SUPPORT) || defined(CFG_TA_FTRACE_SUPPORT)
882 
883 #if defined(CFG_TA_GPROF_SUPPORT)
884 void tee_ta_gprof_sample_pc(vaddr_t pc)
885 {
886 	struct tee_ta_session *s = NULL;
887 	struct sample_buf *sbuf = NULL;
888 	size_t idx = 0;
889 
890 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
891 		return;
892 	sbuf = s->sbuf;
893 	if (!sbuf || !sbuf->enabled)
894 		return; /* PC sampling is not enabled */
895 
896 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
897 	if (idx < sbuf->nsamples)
898 		sbuf->samples[idx]++;
899 	sbuf->count++;
900 }
901 
902 static void gprof_update_session_utime(bool suspend, struct tee_ta_session *s,
903 				       uint64_t now)
904 {
905 	struct sample_buf *sbuf = NULL;
906 
907 	sbuf = s->sbuf;
908 	if (!sbuf)
909 		return;
910 
911 	if (suspend) {
912 		assert(sbuf->usr_entered);
913 		sbuf->usr += now - sbuf->usr_entered;
914 		sbuf->usr_entered = 0;
915 	} else {
916 		assert(!sbuf->usr_entered);
917 		if (!now)
918 			now++; /* 0 is reserved */
919 		sbuf->usr_entered = now;
920 	}
921 }
922 #endif
923 
924 #if defined(CFG_TA_FTRACE_SUPPORT)
925 static void ftrace_update_session_utime(bool suspend, struct tee_ta_session *s,
926 					uint64_t now)
927 {
928 	struct ftrace_buf *fbuf = NULL;
929 	uint32_t i = 0;
930 
931 	fbuf = s->fbuf;
932 	if (!fbuf)
933 		return;
934 
935 	if (suspend) {
936 		fbuf->suspend_time = now;
937 	} else {
938 		for (i = 0; i <= fbuf->ret_idx; i++)
939 			fbuf->begin_time[i] += now - fbuf->suspend_time;
940 	}
941 }
942 #endif
943 
944 /*
945  * Update user-mode CPU time for the current session
946  * @suspend: true if session is being suspended (leaving user mode), false if
947  * it is resumed (entering user mode)
948  */
949 static void tee_ta_update_session_utime(bool suspend)
950 {
951 	struct tee_ta_session *s = NULL;
952 	uint64_t now = 0;
953 
954 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
955 		return;
956 
957 	now = read_cntpct();
958 
959 #if defined(CFG_TA_GPROF_SUPPORT)
960 	gprof_update_session_utime(suspend, s, now);
961 #endif
962 #if defined(CFG_TA_FTRACE_SUPPORT)
963 	ftrace_update_session_utime(suspend, s, now);
964 #endif
965 }
966 
967 void tee_ta_update_session_utime_suspend(void)
968 {
969 	tee_ta_update_session_utime(true);
970 }
971 
972 void tee_ta_update_session_utime_resume(void)
973 {
974 	tee_ta_update_session_utime(false);
975 }
976 #endif
977