xref: /optee_os/core/kernel/tee_ta_manager.c (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/tee_common.h>
12 #include <kernel/tee_misc.h>
13 #include <kernel/tee_ta_manager.h>
14 #include <kernel/tee_time.h>
15 #include <kernel/thread.h>
16 #include <kernel/user_mode_ctx.h>
17 #include <kernel/user_ta.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <mm/tee_mmu.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <tee_api_types.h>
26 #include <tee/entry_std.h>
27 #include <tee/tee_obj.h>
28 #include <tee/tee_svc_cryp.h>
29 #include <tee/tee_svc_storage.h>
30 #include <trace.h>
31 #include <types_ext.h>
32 #include <user_ta_header.h>
33 #include <utee_types.h>
34 #include <util.h>
35 
36 /* This mutex protects the critical section in tee_ta_init_session */
37 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
38 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
39 
40 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
41 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
42 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
43 static size_t tee_ta_single_instance_count;
44 #endif
45 
46 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
47 static void lock_single_instance(void)
48 {
49 }
50 
51 static void unlock_single_instance(void)
52 {
53 }
54 
55 static bool has_single_instance_lock(void)
56 {
57 	return false;
58 }
59 #else
60 static void lock_single_instance(void)
61 {
62 	/* Requires tee_ta_mutex to be held */
63 	if (tee_ta_single_instance_thread != thread_get_id()) {
64 		/* Wait until the single-instance lock is available. */
65 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
66 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
67 
68 		tee_ta_single_instance_thread = thread_get_id();
69 		assert(tee_ta_single_instance_count == 0);
70 	}
71 
72 	tee_ta_single_instance_count++;
73 }
74 
75 static void unlock_single_instance(void)
76 {
77 	/* Requires tee_ta_mutex to be held */
78 	assert(tee_ta_single_instance_thread == thread_get_id());
79 	assert(tee_ta_single_instance_count > 0);
80 
81 	tee_ta_single_instance_count--;
82 	if (tee_ta_single_instance_count == 0) {
83 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
84 		condvar_signal(&tee_ta_cv);
85 	}
86 }
87 
88 static bool has_single_instance_lock(void)
89 {
90 	/* Requires tee_ta_mutex to be held */
91 	return tee_ta_single_instance_thread == thread_get_id();
92 }
93 #endif
94 
95 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
96 {
97 	bool rc = true;
98 
99 	if (ctx->flags & TA_FLAG_CONCURRENT)
100 		return true;
101 
102 	mutex_lock(&tee_ta_mutex);
103 
104 	if (ctx->initializing) {
105 		/*
106 		 * Context is still initializing and flags cannot be relied
107 		 * on for user TAs. Wait here until it's initialized.
108 		 */
109 		while (ctx->busy)
110 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
111 	}
112 
113 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 		lock_single_instance();
115 
116 	if (has_single_instance_lock()) {
117 		if (ctx->busy) {
118 			/*
119 			 * We're holding the single-instance lock and the
120 			 * TA is busy, as waiting now would only cause a
121 			 * dead-lock, we release the lock and return false.
122 			 */
123 			rc = false;
124 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
125 				unlock_single_instance();
126 		}
127 	} else {
128 		/*
129 		 * We're not holding the single-instance lock, we're free to
130 		 * wait for the TA to become available.
131 		 */
132 		while (ctx->busy)
133 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
134 	}
135 
136 	/* Either it's already true or we should set it to true */
137 	ctx->busy = true;
138 
139 	mutex_unlock(&tee_ta_mutex);
140 	return rc;
141 }
142 
143 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
144 {
145 	if (!tee_ta_try_set_busy(ctx))
146 		panic();
147 }
148 
149 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
150 {
151 	if (ctx->flags & TA_FLAG_CONCURRENT)
152 		return;
153 
154 	mutex_lock(&tee_ta_mutex);
155 
156 	assert(ctx->busy);
157 	ctx->busy = false;
158 	condvar_signal(&ctx->busy_cv);
159 
160 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
161 		unlock_single_instance();
162 
163 	ctx->initializing = false;
164 
165 	mutex_unlock(&tee_ta_mutex);
166 }
167 
168 static void dec_session_ref_count(struct tee_ta_session *s)
169 {
170 	assert(s->ref_count > 0);
171 	s->ref_count--;
172 	if (s->ref_count == 1)
173 		condvar_signal(&s->refc_cv);
174 }
175 
176 void tee_ta_put_session(struct tee_ta_session *s)
177 {
178 	mutex_lock(&tee_ta_mutex);
179 
180 	if (s->lock_thread == thread_get_id()) {
181 		s->lock_thread = THREAD_ID_INVALID;
182 		condvar_signal(&s->lock_cv);
183 	}
184 	dec_session_ref_count(s);
185 
186 	mutex_unlock(&tee_ta_mutex);
187 }
188 
189 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
190 			struct tee_ta_session_head *open_sessions)
191 {
192 	struct tee_ta_session *s = NULL;
193 	struct tee_ta_session *found = NULL;
194 
195 	TAILQ_FOREACH(s, open_sessions, link) {
196 		if (s->id == id) {
197 			found = s;
198 			break;
199 		}
200 	}
201 
202 	return found;
203 }
204 
205 struct tee_ta_session *tee_ta_find_session(uint32_t id,
206 			struct tee_ta_session_head *open_sessions)
207 {
208 	struct tee_ta_session *s = NULL;
209 
210 	mutex_lock(&tee_ta_mutex);
211 
212 	s = tee_ta_find_session_nolock(id, open_sessions);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 
216 	return s;
217 }
218 
219 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
220 			struct tee_ta_session_head *open_sessions)
221 {
222 	struct tee_ta_session *s;
223 
224 	mutex_lock(&tee_ta_mutex);
225 
226 	while (true) {
227 		s = tee_ta_find_session_nolock(id, open_sessions);
228 		if (!s)
229 			break;
230 		if (s->unlink) {
231 			s = NULL;
232 			break;
233 		}
234 		s->ref_count++;
235 		if (!exclusive)
236 			break;
237 
238 		assert(s->lock_thread != thread_get_id());
239 
240 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
241 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
242 
243 		if (s->unlink) {
244 			dec_session_ref_count(s);
245 			s = NULL;
246 			break;
247 		}
248 
249 		s->lock_thread = thread_get_id();
250 		break;
251 	}
252 
253 	mutex_unlock(&tee_ta_mutex);
254 	return s;
255 }
256 
257 static void tee_ta_unlink_session(struct tee_ta_session *s,
258 			struct tee_ta_session_head *open_sessions)
259 {
260 	mutex_lock(&tee_ta_mutex);
261 
262 	assert(s->ref_count >= 1);
263 	assert(s->lock_thread == thread_get_id());
264 	assert(!s->unlink);
265 
266 	s->unlink = true;
267 	condvar_broadcast(&s->lock_cv);
268 
269 	while (s->ref_count != 1)
270 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
271 
272 	TAILQ_REMOVE(open_sessions, s, link);
273 
274 	mutex_unlock(&tee_ta_mutex);
275 }
276 
277 static void destroy_session(struct tee_ta_session *s,
278 			    struct tee_ta_session_head *open_sessions)
279 {
280 #if defined(CFG_FTRACE_SUPPORT)
281 	if (s->ctx && s->ctx->ops->dump_ftrace) {
282 		tee_ta_push_current_session(s);
283 		s->fbuf = NULL;
284 		s->ctx->ops->dump_ftrace(s->ctx);
285 		tee_ta_pop_current_session();
286 	}
287 #endif
288 
289 	tee_ta_unlink_session(s, open_sessions);
290 #if defined(CFG_TA_GPROF_SUPPORT)
291 	free(s->sbuf);
292 #endif
293 	free(s);
294 }
295 
296 static void destroy_context(struct tee_ta_ctx *ctx)
297 {
298 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
299 
300 	condvar_destroy(&ctx->busy_cv);
301 	pgt_flush_ctx(ctx);
302 	ctx->ops->destroy(ctx);
303 }
304 
305 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
306 {
307 	struct tee_ta_session *sess = NULL;
308 	struct tee_ta_session_head *open_sessions = NULL;
309 	struct tee_ta_ctx *ctx = NULL;
310 	struct user_ta_ctx *utc = NULL;
311 	size_t count = 1; /* start counting the references to the context */
312 
313 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
314 
315 	mutex_lock(&tee_ta_mutex);
316 	nsec_sessions_list_head(&open_sessions);
317 
318 	/*
319 	 * Next two loops will remove all references to the context which is
320 	 * about to be destroyed, but avoiding such operation to the current
321 	 * session. That will be done later in this function, only after
322 	 * the context will be properly destroyed.
323 	 */
324 
325 	/*
326 	 * Scan the entire list of opened sessions by the clients from
327 	 * non-secure world.
328 	 */
329 	TAILQ_FOREACH(sess, open_sessions, link) {
330 		if (sess->ctx == s->ctx && sess != s) {
331 			sess->ctx = NULL;
332 			count++;
333 		}
334 	}
335 
336 	/*
337 	 * Scan all sessions opened from secure side by searching through
338 	 * all available TA instances and for each context, scan all opened
339 	 * sessions.
340 	 */
341 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
342 		if (is_user_ta_ctx(ctx)) {
343 			utc = to_user_ta_ctx(ctx);
344 
345 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
346 				if (sess->ctx == s->ctx && sess != s) {
347 					sess->ctx = NULL;
348 					count++;
349 				}
350 			}
351 		}
352 	}
353 
354 	assert(count == s->ctx->ref_count);
355 
356 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
357 	mutex_unlock(&tee_ta_mutex);
358 
359 	destroy_context(s->ctx);
360 
361 	s->ctx = NULL;
362 }
363 
364 /*
365  * tee_ta_context_find - Find TA in session list based on a UUID (input)
366  * Returns a pointer to the session
367  */
368 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
369 {
370 	struct tee_ta_ctx *ctx;
371 
372 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
373 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
374 			return ctx;
375 	}
376 
377 	return NULL;
378 }
379 
380 /* check if requester (client ID) matches session initial client */
381 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
382 {
383 	if (id == KERN_IDENTITY)
384 		return TEE_SUCCESS;
385 
386 	if (id == NSAPP_IDENTITY) {
387 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
388 			DMSG("nsec tries to hijack TA session");
389 			return TEE_ERROR_ACCESS_DENIED;
390 		}
391 		return TEE_SUCCESS;
392 	}
393 
394 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
395 		DMSG("client id mismatch");
396 		return TEE_ERROR_ACCESS_DENIED;
397 	}
398 	return TEE_SUCCESS;
399 }
400 
401 /*
402  * Check if invocation parameters matches TA properties
403  *
404  * @s - current session handle
405  * @param - already identified memory references hold a valid 'mobj'.
406  *
407  * Policy:
408  * - All TAs can access 'non-secure' shared memory.
409  * - All TAs can access TEE private memory (seccpy)
410  * - Only SDP flagged TAs can accept SDP memory references.
411  */
412 #ifndef CFG_SECURE_DATA_PATH
413 static bool check_params(struct tee_ta_session *sess __unused,
414 			 struct tee_ta_param *param __unused)
415 {
416 	/*
417 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
418 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
419 	 * permissions regarding memory reference parameters.
420 	 */
421 	return true;
422 }
423 #else
424 static bool check_params(struct tee_ta_session *sess,
425 			 struct tee_ta_param *param)
426 {
427 	int n;
428 
429 	/*
430 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
431 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
432 	 */
433 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
434 		return true;
435 
436 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
437 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
438 		struct param_mem *mem = &param->u[n].mem;
439 
440 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
441 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
442 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
443 			continue;
444 		if (!mem->size)
445 			continue;
446 		if (mobj_is_sdp_mem(mem->mobj))
447 			return false;
448 	}
449 	return true;
450 }
451 #endif
452 
453 static void set_invoke_timeout(struct tee_ta_session *sess,
454 				      uint32_t cancel_req_to)
455 {
456 	TEE_Time current_time;
457 	TEE_Time cancel_time;
458 
459 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
460 		goto infinite;
461 
462 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
463 		goto infinite;
464 
465 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
466 			 &cancel_time.seconds))
467 		goto infinite;
468 
469 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
470 	if (cancel_time.millis > 1000) {
471 		if (ADD_OVERFLOW(current_time.seconds, 1,
472 				 &cancel_time.seconds))
473 			goto infinite;
474 
475 		cancel_time.seconds++;
476 		cancel_time.millis -= 1000;
477 	}
478 
479 	sess->cancel_time = cancel_time;
480 	return;
481 
482 infinite:
483 	sess->cancel_time.seconds = UINT32_MAX;
484 	sess->cancel_time.millis = UINT32_MAX;
485 }
486 
487 /*-----------------------------------------------------------------------------
488  * Close a Trusted Application and free available resources
489  *---------------------------------------------------------------------------*/
490 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
491 				struct tee_ta_session_head *open_sessions,
492 				const TEE_Identity *clnt_id)
493 {
494 	struct tee_ta_session *sess;
495 	struct tee_ta_ctx *ctx;
496 	bool keep_alive;
497 
498 	DMSG("csess 0x%" PRIxVA " id %u",
499 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
500 
501 	if (!csess)
502 		return TEE_ERROR_ITEM_NOT_FOUND;
503 
504 	sess = tee_ta_get_session(csess->id, true, open_sessions);
505 
506 	if (!sess) {
507 		EMSG("session 0x%" PRIxVA " to be removed is not found",
508 		     (vaddr_t)csess);
509 		return TEE_ERROR_ITEM_NOT_FOUND;
510 	}
511 
512 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
513 		tee_ta_put_session(sess);
514 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
515 	}
516 
517 	ctx = sess->ctx;
518 	DMSG("Destroy session");
519 
520 	if (!ctx) {
521 		destroy_session(sess, open_sessions);
522 		return TEE_SUCCESS;
523 	}
524 
525 	if (ctx->panicked) {
526 		destroy_session(sess, open_sessions);
527 	} else {
528 		tee_ta_set_busy(ctx);
529 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
530 		ctx->ops->enter_close_session(sess);
531 		destroy_session(sess, open_sessions);
532 		tee_ta_clear_busy(ctx);
533 	}
534 
535 	mutex_lock(&tee_ta_mutex);
536 
537 	if (ctx->ref_count <= 0)
538 		panic();
539 
540 	ctx->ref_count--;
541 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
542 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
543 	if (!ctx->ref_count && !keep_alive) {
544 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
545 		mutex_unlock(&tee_ta_mutex);
546 
547 		destroy_context(ctx);
548 	} else
549 		mutex_unlock(&tee_ta_mutex);
550 
551 	return TEE_SUCCESS;
552 }
553 
554 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
555 			struct tee_ta_session *s)
556 {
557 	/*
558 	 * If TA isn't single instance it should be loaded as new
559 	 * instance instead of doing anything with this instance.
560 	 * So tell the caller that we didn't find the TA it the
561 	 * caller will load a new instance.
562 	 */
563 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
564 		return TEE_ERROR_ITEM_NOT_FOUND;
565 
566 	/*
567 	 * The TA is single instance, if it isn't multi session we
568 	 * can't create another session unless its reference is zero
569 	 */
570 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
571 		return TEE_ERROR_BUSY;
572 
573 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
574 
575 	ctx->ref_count++;
576 	s->ctx = ctx;
577 	return TEE_SUCCESS;
578 }
579 
580 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
581 {
582 	struct tee_ta_session *last = NULL;
583 	uint32_t saved = 0;
584 	uint32_t id = 1;
585 
586 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
587 	if (last) {
588 		/* This value is less likely to be already used */
589 		id = last->id + 1;
590 		if (!id)
591 			id++; /* 0 is not valid */
592 	}
593 
594 	saved = id;
595 	do {
596 		if (!tee_ta_find_session_nolock(id, open_sessions))
597 			return id;
598 		id++;
599 		if (!id)
600 			id++;
601 	} while (id != saved);
602 
603 	return 0;
604 }
605 
606 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
607 				struct tee_ta_session_head *open_sessions,
608 				const TEE_UUID *uuid,
609 				struct tee_ta_session **sess)
610 {
611 	TEE_Result res;
612 	struct tee_ta_ctx *ctx;
613 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
614 
615 	*err = TEE_ORIGIN_TEE;
616 	if (!s)
617 		return TEE_ERROR_OUT_OF_MEMORY;
618 
619 	s->cancel_mask = true;
620 	condvar_init(&s->refc_cv);
621 	condvar_init(&s->lock_cv);
622 	s->lock_thread = THREAD_ID_INVALID;
623 	s->ref_count = 1;
624 
625 
626 	/*
627 	 * We take the global TA mutex here and hold it while doing
628 	 * RPC to load the TA. This big critical section should be broken
629 	 * down into smaller pieces.
630 	 */
631 
632 
633 	mutex_lock(&tee_ta_mutex);
634 	s->id = new_session_id(open_sessions);
635 	if (!s->id) {
636 		res = TEE_ERROR_OVERFLOW;
637 		goto out;
638 	}
639 	TAILQ_INSERT_TAIL(open_sessions, s, link);
640 
641 	/* Look for already loaded TA */
642 	ctx = tee_ta_context_find(uuid);
643 	if (ctx) {
644 		res = tee_ta_init_session_with_context(ctx, s);
645 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
646 			goto out;
647 	}
648 
649 	/* Look for pseudo TA */
650 	res = tee_ta_init_pseudo_ta_session(uuid, s);
651 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
652 		goto out;
653 
654 	/* Look for user TA */
655 	res = tee_ta_init_user_ta_session(uuid, s);
656 
657 out:
658 	if (res == TEE_SUCCESS) {
659 		*sess = s;
660 	} else {
661 		TAILQ_REMOVE(open_sessions, s, link);
662 		free(s);
663 	}
664 	mutex_unlock(&tee_ta_mutex);
665 	return res;
666 }
667 
668 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
669 			       struct tee_ta_session **sess,
670 			       struct tee_ta_session_head *open_sessions,
671 			       const TEE_UUID *uuid,
672 			       const TEE_Identity *clnt_id,
673 			       uint32_t cancel_req_to,
674 			       struct tee_ta_param *param)
675 {
676 	TEE_Result res;
677 	struct tee_ta_session *s = NULL;
678 	struct tee_ta_ctx *ctx;
679 	bool panicked;
680 	bool was_busy = false;
681 
682 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
683 	if (res != TEE_SUCCESS) {
684 		DMSG("init session failed 0x%x", res);
685 		return res;
686 	}
687 
688 	if (!check_params(s, param))
689 		return TEE_ERROR_BAD_PARAMETERS;
690 
691 	ctx = s->ctx;
692 
693 	if (!ctx || ctx->panicked) {
694 		DMSG("panicked, call tee_ta_close_session()");
695 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
696 		*err = TEE_ORIGIN_TEE;
697 		return TEE_ERROR_TARGET_DEAD;
698 	}
699 
700 	*sess = s;
701 	/* Save identity of the owner of the session */
702 	s->clnt_id = *clnt_id;
703 
704 	if (tee_ta_try_set_busy(ctx)) {
705 		set_invoke_timeout(s, cancel_req_to);
706 		res = ctx->ops->enter_open_session(s, param, err);
707 		tee_ta_clear_busy(ctx);
708 	} else {
709 		/* Deadlock avoided */
710 		res = TEE_ERROR_BUSY;
711 		was_busy = true;
712 	}
713 
714 	panicked = ctx->panicked;
715 
716 	tee_ta_put_session(s);
717 	if (panicked || (res != TEE_SUCCESS))
718 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
719 
720 	/*
721 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
722 	 * apart from panicking.
723 	 */
724 	if (panicked || was_busy)
725 		*err = TEE_ORIGIN_TEE;
726 
727 	if (res != TEE_SUCCESS)
728 		EMSG("Failed. Return error 0x%x", res);
729 
730 	return res;
731 }
732 
733 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
734 				 struct tee_ta_session *sess,
735 				 const TEE_Identity *clnt_id,
736 				 uint32_t cancel_req_to, uint32_t cmd,
737 				 struct tee_ta_param *param)
738 {
739 	TEE_Result res;
740 
741 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
742 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
743 
744 	if (!check_params(sess, param))
745 		return TEE_ERROR_BAD_PARAMETERS;
746 
747 	if (!sess->ctx) {
748 		/* The context has been already destroyed */
749 		*err = TEE_ORIGIN_TEE;
750 		return TEE_ERROR_TARGET_DEAD;
751 	} else if (sess->ctx->panicked) {
752 		DMSG("Panicked !");
753 		destroy_ta_ctx_from_session(sess);
754 		*err = TEE_ORIGIN_TEE;
755 		return TEE_ERROR_TARGET_DEAD;
756 	}
757 
758 	tee_ta_set_busy(sess->ctx);
759 
760 	set_invoke_timeout(sess, cancel_req_to);
761 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
762 
763 	tee_ta_clear_busy(sess->ctx);
764 
765 	if (sess->ctx->panicked) {
766 		destroy_ta_ctx_from_session(sess);
767 		*err = TEE_ORIGIN_TEE;
768 		return TEE_ERROR_TARGET_DEAD;
769 	}
770 
771 	/* Short buffer is not an effective error case */
772 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
773 		DMSG("Error: %x of %d", res, *err);
774 
775 	return res;
776 }
777 
778 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
779 				 struct tee_ta_session *sess,
780 				 const TEE_Identity *clnt_id)
781 {
782 	*err = TEE_ORIGIN_TEE;
783 
784 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
785 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
786 
787 	sess->cancel = true;
788 	return TEE_SUCCESS;
789 }
790 
791 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
792 {
793 	TEE_Time current_time;
794 
795 	if (s->cancel_mask)
796 		return false;
797 
798 	if (s->cancel)
799 		return true;
800 
801 	if (s->cancel_time.seconds == UINT32_MAX)
802 		return false;
803 
804 	if (curr_time != NULL)
805 		current_time = *curr_time;
806 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
807 		return false;
808 
809 	if (current_time.seconds > s->cancel_time.seconds ||
810 	    (current_time.seconds == s->cancel_time.seconds &&
811 	     current_time.millis >= s->cancel_time.millis)) {
812 		return true;
813 	}
814 
815 	return false;
816 }
817 
818 static void update_current_ctx(struct thread_specific_data *tsd)
819 {
820 	struct tee_ta_ctx *ctx = NULL;
821 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
822 
823 	if (s) {
824 		if (is_pseudo_ta_ctx(s->ctx))
825 			s = TAILQ_NEXT(s, link_tsd);
826 
827 		if (s)
828 			ctx = s->ctx;
829 	}
830 
831 	if (tsd->ctx != ctx)
832 		tee_mmu_set_ctx(ctx);
833 	/*
834 	 * If current context is of user mode, then it has to be active too.
835 	 */
836 	if (is_user_mode_ctx(ctx) != core_mmu_user_mapping_is_active())
837 		panic("unexpected active mapping");
838 }
839 
840 void tee_ta_push_current_session(struct tee_ta_session *sess)
841 {
842 	struct thread_specific_data *tsd = thread_get_tsd();
843 
844 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
845 	update_current_ctx(tsd);
846 }
847 
848 struct tee_ta_session *tee_ta_pop_current_session(void)
849 {
850 	struct thread_specific_data *tsd = thread_get_tsd();
851 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
852 
853 	if (s) {
854 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
855 		update_current_ctx(tsd);
856 	}
857 	return s;
858 }
859 
860 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
861 {
862 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
863 
864 	if (!s)
865 		return TEE_ERROR_BAD_STATE;
866 	*sess = s;
867 	return TEE_SUCCESS;
868 }
869 
870 struct tee_ta_session *tee_ta_get_calling_session(void)
871 {
872 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
873 
874 	if (s)
875 		s = TAILQ_NEXT(s, link_tsd);
876 	return s;
877 }
878 
879 #if defined(CFG_TA_GPROF_SUPPORT)
880 void tee_ta_gprof_sample_pc(vaddr_t pc)
881 {
882 	struct tee_ta_session *s = NULL;
883 	struct user_ta_ctx *utc = NULL;
884 	struct sample_buf *sbuf = NULL;
885 	TEE_Result res = 0;
886 	size_t idx = 0;
887 
888 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
889 		return;
890 	sbuf = s->sbuf;
891 	if (!sbuf || !sbuf->enabled)
892 		return; /* PC sampling is not enabled */
893 
894 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
895 	if (idx < sbuf->nsamples) {
896 		utc = to_user_ta_ctx(s->ctx);
897 		res = tee_mmu_check_access_rights(&utc->uctx,
898 						  TEE_MEMORY_ACCESS_READ |
899 						  TEE_MEMORY_ACCESS_WRITE |
900 						  TEE_MEMORY_ACCESS_ANY_OWNER,
901 						  (uaddr_t)&sbuf->samples[idx],
902 						  sizeof(*sbuf->samples));
903 		if (res != TEE_SUCCESS)
904 			return;
905 		sbuf->samples[idx]++;
906 	}
907 	sbuf->count++;
908 }
909 
910 static void gprof_update_session_utime(bool suspend, struct tee_ta_session *s,
911 				       uint64_t now)
912 {
913 	struct sample_buf *sbuf = NULL;
914 
915 	sbuf = s->sbuf;
916 	if (!sbuf)
917 		return;
918 
919 	if (suspend) {
920 		assert(sbuf->usr_entered);
921 		sbuf->usr += now - sbuf->usr_entered;
922 		sbuf->usr_entered = 0;
923 	} else {
924 		assert(!sbuf->usr_entered);
925 		if (!now)
926 			now++; /* 0 is reserved */
927 		sbuf->usr_entered = now;
928 	}
929 }
930 
931 /*
932  * Update user-mode CPU time for the current session
933  * @suspend: true if session is being suspended (leaving user mode), false if
934  * it is resumed (entering user mode)
935  */
936 static void tee_ta_update_session_utime(bool suspend)
937 {
938 	struct tee_ta_session *s = NULL;
939 	uint64_t now = 0;
940 
941 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
942 		return;
943 
944 	now = read_cntpct();
945 
946 	gprof_update_session_utime(suspend, s, now);
947 }
948 
949 void tee_ta_update_session_utime_suspend(void)
950 {
951 	tee_ta_update_session_utime(true);
952 }
953 
954 void tee_ta_update_session_utime_resume(void)
955 {
956 	tee_ta_update_session_utime(false);
957 }
958 #endif
959 
960 #if defined(CFG_FTRACE_SUPPORT)
961 static void ftrace_update_times(bool suspend)
962 {
963 	struct tee_ta_session *s = NULL;
964 	struct ftrace_buf *fbuf = NULL;
965 	uint64_t now = 0;
966 	uint32_t i = 0;
967 
968 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
969 		return;
970 
971 	now = read_cntpct();
972 
973 	fbuf = s->fbuf;
974 	if (!fbuf)
975 		return;
976 
977 	if (suspend) {
978 		fbuf->suspend_time = now;
979 	} else {
980 		for (i = 0; i <= fbuf->ret_idx; i++)
981 			fbuf->begin_time[i] += now - fbuf->suspend_time;
982 	}
983 }
984 
985 void tee_ta_ftrace_update_times_suspend(void)
986 {
987 	ftrace_update_times(true);
988 }
989 
990 void tee_ta_ftrace_update_times_resume(void)
991 {
992 	ftrace_update_times(false);
993 }
994 #endif
995