xref: /optee_os/core/kernel/tee_ta_manager.c (revision f9cd31c5310d178dc9a91bf195caba90c8f55042)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/mobj.h>
22 #include <mm/vm.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <tee_api_types.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_cryp.h>
30 #include <tee/tee_svc_storage.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36 
37 /* This mutex protects the critical section in tee_ta_init_session */
38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
39 /* This condvar is used when waiting for a TA context to become initialized */
40 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
41 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
42 
43 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
44 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
45 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
46 static size_t tee_ta_single_instance_count;
47 #endif
48 
49 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
50 static void lock_single_instance(void)
51 {
52 }
53 
54 static void unlock_single_instance(void)
55 {
56 }
57 
58 static bool has_single_instance_lock(void)
59 {
60 	return false;
61 }
62 #else
63 static void lock_single_instance(void)
64 {
65 	/* Requires tee_ta_mutex to be held */
66 	if (tee_ta_single_instance_thread != thread_get_id()) {
67 		/* Wait until the single-instance lock is available. */
68 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
69 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
70 
71 		tee_ta_single_instance_thread = thread_get_id();
72 		assert(tee_ta_single_instance_count == 0);
73 	}
74 
75 	tee_ta_single_instance_count++;
76 }
77 
78 static void unlock_single_instance(void)
79 {
80 	/* Requires tee_ta_mutex to be held */
81 	assert(tee_ta_single_instance_thread == thread_get_id());
82 	assert(tee_ta_single_instance_count > 0);
83 
84 	tee_ta_single_instance_count--;
85 	if (tee_ta_single_instance_count == 0) {
86 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
87 		condvar_signal(&tee_ta_cv);
88 	}
89 }
90 
91 static bool has_single_instance_lock(void)
92 {
93 	/* Requires tee_ta_mutex to be held */
94 	return tee_ta_single_instance_thread == thread_get_id();
95 }
96 #endif
97 
98 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
99 {
100 	bool rc = true;
101 
102 	if (ctx->flags & TA_FLAG_CONCURRENT)
103 		return true;
104 
105 	mutex_lock(&tee_ta_mutex);
106 
107 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
108 		lock_single_instance();
109 
110 	if (has_single_instance_lock()) {
111 		if (ctx->busy) {
112 			/*
113 			 * We're holding the single-instance lock and the
114 			 * TA is busy, as waiting now would only cause a
115 			 * dead-lock, we release the lock and return false.
116 			 */
117 			rc = false;
118 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
119 				unlock_single_instance();
120 		}
121 	} else {
122 		/*
123 		 * We're not holding the single-instance lock, we're free to
124 		 * wait for the TA to become available.
125 		 */
126 		while (ctx->busy)
127 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
128 	}
129 
130 	/* Either it's already true or we should set it to true */
131 	ctx->busy = true;
132 
133 	mutex_unlock(&tee_ta_mutex);
134 	return rc;
135 }
136 
137 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
138 {
139 	if (!tee_ta_try_set_busy(ctx))
140 		panic();
141 }
142 
143 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
144 {
145 	if (ctx->flags & TA_FLAG_CONCURRENT)
146 		return;
147 
148 	mutex_lock(&tee_ta_mutex);
149 
150 	assert(ctx->busy);
151 	ctx->busy = false;
152 	condvar_signal(&ctx->busy_cv);
153 
154 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
155 		unlock_single_instance();
156 
157 	ctx->initializing = false;
158 
159 	mutex_unlock(&tee_ta_mutex);
160 }
161 
162 static void dec_session_ref_count(struct tee_ta_session *s)
163 {
164 	assert(s->ref_count > 0);
165 	s->ref_count--;
166 	if (s->ref_count == 1)
167 		condvar_signal(&s->refc_cv);
168 }
169 
170 void tee_ta_put_session(struct tee_ta_session *s)
171 {
172 	mutex_lock(&tee_ta_mutex);
173 
174 	if (s->lock_thread == thread_get_id()) {
175 		s->lock_thread = THREAD_ID_INVALID;
176 		condvar_signal(&s->lock_cv);
177 	}
178 	dec_session_ref_count(s);
179 
180 	mutex_unlock(&tee_ta_mutex);
181 }
182 
183 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
184 			struct tee_ta_session_head *open_sessions)
185 {
186 	struct tee_ta_session *s = NULL;
187 	struct tee_ta_session *found = NULL;
188 
189 	TAILQ_FOREACH(s, open_sessions, link) {
190 		if (s->id == id) {
191 			found = s;
192 			break;
193 		}
194 	}
195 
196 	return found;
197 }
198 
199 struct tee_ta_session *tee_ta_find_session(uint32_t id,
200 			struct tee_ta_session_head *open_sessions)
201 {
202 	struct tee_ta_session *s = NULL;
203 
204 	mutex_lock(&tee_ta_mutex);
205 
206 	s = tee_ta_find_session_nolock(id, open_sessions);
207 
208 	mutex_unlock(&tee_ta_mutex);
209 
210 	return s;
211 }
212 
213 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
214 			struct tee_ta_session_head *open_sessions)
215 {
216 	struct tee_ta_session *s;
217 
218 	mutex_lock(&tee_ta_mutex);
219 
220 	while (true) {
221 		s = tee_ta_find_session_nolock(id, open_sessions);
222 		if (!s)
223 			break;
224 		if (s->unlink) {
225 			s = NULL;
226 			break;
227 		}
228 		s->ref_count++;
229 		if (!exclusive)
230 			break;
231 
232 		assert(s->lock_thread != thread_get_id());
233 
234 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
235 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
236 
237 		if (s->unlink) {
238 			dec_session_ref_count(s);
239 			s = NULL;
240 			break;
241 		}
242 
243 		s->lock_thread = thread_get_id();
244 		break;
245 	}
246 
247 	mutex_unlock(&tee_ta_mutex);
248 	return s;
249 }
250 
251 static void tee_ta_unlink_session(struct tee_ta_session *s,
252 			struct tee_ta_session_head *open_sessions)
253 {
254 	mutex_lock(&tee_ta_mutex);
255 
256 	assert(s->ref_count >= 1);
257 	assert(s->lock_thread == thread_get_id());
258 	assert(!s->unlink);
259 
260 	s->unlink = true;
261 	condvar_broadcast(&s->lock_cv);
262 
263 	while (s->ref_count != 1)
264 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
265 
266 	TAILQ_REMOVE(open_sessions, s, link);
267 
268 	mutex_unlock(&tee_ta_mutex);
269 }
270 
271 static void destroy_session(struct tee_ta_session *s,
272 			    struct tee_ta_session_head *open_sessions)
273 {
274 #if defined(CFG_FTRACE_SUPPORT)
275 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
276 		ts_push_current_session(&s->ts_sess);
277 		s->ts_sess.fbuf = NULL;
278 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
279 		ts_pop_current_session();
280 	}
281 #endif
282 
283 	tee_ta_unlink_session(s, open_sessions);
284 #if defined(CFG_TA_GPROF_SUPPORT)
285 	free(s->ts_sess.sbuf);
286 #endif
287 	free(s);
288 }
289 
290 static void destroy_context(struct tee_ta_ctx *ctx)
291 {
292 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
293 
294 	condvar_destroy(&ctx->busy_cv);
295 	pgt_flush_ctx(&ctx->ts_ctx);
296 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
297 }
298 
299 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
300 {
301 	struct tee_ta_session *sess = NULL;
302 	struct tee_ta_session_head *open_sessions = NULL;
303 	struct tee_ta_ctx *ctx = NULL;
304 	struct user_ta_ctx *utc = NULL;
305 	size_t count = 1; /* start counting the references to the context */
306 
307 	DMSG("Remove references to context (0x%" PRIxVA ")",
308 	     (vaddr_t)s->ts_sess.ctx);
309 
310 	mutex_lock(&tee_ta_mutex);
311 	nsec_sessions_list_head(&open_sessions);
312 
313 	/*
314 	 * Next two loops will remove all references to the context which is
315 	 * about to be destroyed, but avoiding such operation to the current
316 	 * session. That will be done later in this function, only after
317 	 * the context will be properly destroyed.
318 	 */
319 
320 	/*
321 	 * Scan the entire list of opened sessions by the clients from
322 	 * non-secure world.
323 	 */
324 	TAILQ_FOREACH(sess, open_sessions, link) {
325 		if (sess->ts_sess.ctx == s->ts_sess.ctx && sess != s) {
326 			sess->ts_sess.ctx = NULL;
327 			count++;
328 		}
329 	}
330 
331 	/*
332 	 * Scan all sessions opened from secure side by searching through
333 	 * all available TA instances and for each context, scan all opened
334 	 * sessions.
335 	 */
336 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
337 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
338 			utc = to_user_ta_ctx(&ctx->ts_ctx);
339 
340 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
341 				if (sess->ts_sess.ctx == s->ts_sess.ctx &&
342 				    sess != s) {
343 					sess->ts_sess.ctx = NULL;
344 					count++;
345 				}
346 			}
347 		}
348 	}
349 
350 	ctx = to_ta_ctx(s->ts_sess.ctx);
351 	assert(count == ctx->ref_count);
352 
353 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
354 	mutex_unlock(&tee_ta_mutex);
355 
356 	destroy_context(ctx);
357 
358 	s->ts_sess.ctx = NULL;
359 }
360 
361 /*
362  * tee_ta_context_find - Find TA in session list based on a UUID (input)
363  * Returns a pointer to the session
364  */
365 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
366 {
367 	struct tee_ta_ctx *ctx;
368 
369 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
370 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
371 			return ctx;
372 	}
373 
374 	return NULL;
375 }
376 
377 /* check if requester (client ID) matches session initial client */
378 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
379 {
380 	if (id == KERN_IDENTITY)
381 		return TEE_SUCCESS;
382 
383 	if (id == NSAPP_IDENTITY) {
384 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
385 			DMSG("nsec tries to hijack TA session");
386 			return TEE_ERROR_ACCESS_DENIED;
387 		}
388 		return TEE_SUCCESS;
389 	}
390 
391 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
392 		DMSG("client id mismatch");
393 		return TEE_ERROR_ACCESS_DENIED;
394 	}
395 	return TEE_SUCCESS;
396 }
397 
398 /*
399  * Check if invocation parameters matches TA properties
400  *
401  * @s - current session handle
402  * @param - already identified memory references hold a valid 'mobj'.
403  *
404  * Policy:
405  * - All TAs can access 'non-secure' shared memory.
406  * - All TAs can access TEE private memory (seccpy)
407  * - Only SDP flagged TAs can accept SDP memory references.
408  */
409 #ifndef CFG_SECURE_DATA_PATH
410 static bool check_params(struct tee_ta_session *sess __unused,
411 			 struct tee_ta_param *param __unused)
412 {
413 	/*
414 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
415 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
416 	 * permissions regarding memory reference parameters.
417 	 */
418 	return true;
419 }
420 #else
421 static bool check_params(struct tee_ta_session *sess,
422 			 struct tee_ta_param *param)
423 {
424 	int n;
425 
426 	/*
427 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
428 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
429 	 */
430 	if (sess->ts_sess.ctx &&
431 	    to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
432 		return true;
433 
434 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
435 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
436 		struct param_mem *mem = &param->u[n].mem;
437 
438 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
439 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
440 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
441 			continue;
442 		if (!mem->size)
443 			continue;
444 		if (mobj_is_sdp_mem(mem->mobj))
445 			return false;
446 	}
447 	return true;
448 }
449 #endif
450 
451 static void set_invoke_timeout(struct tee_ta_session *sess,
452 				      uint32_t cancel_req_to)
453 {
454 	TEE_Time current_time;
455 	TEE_Time cancel_time;
456 
457 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
458 		goto infinite;
459 
460 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
461 		goto infinite;
462 
463 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
464 			 &cancel_time.seconds))
465 		goto infinite;
466 
467 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
468 	if (cancel_time.millis > 1000) {
469 		if (ADD_OVERFLOW(current_time.seconds, 1,
470 				 &cancel_time.seconds))
471 			goto infinite;
472 
473 		cancel_time.seconds++;
474 		cancel_time.millis -= 1000;
475 	}
476 
477 	sess->cancel_time = cancel_time;
478 	return;
479 
480 infinite:
481 	sess->cancel_time.seconds = UINT32_MAX;
482 	sess->cancel_time.millis = UINT32_MAX;
483 }
484 
485 /*-----------------------------------------------------------------------------
486  * Close a Trusted Application and free available resources
487  *---------------------------------------------------------------------------*/
488 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
489 				struct tee_ta_session_head *open_sessions,
490 				const TEE_Identity *clnt_id)
491 {
492 	struct tee_ta_session *sess;
493 	struct tee_ta_ctx *ctx;
494 	bool keep_alive;
495 
496 	DMSG("csess 0x%" PRIxVA " id %u",
497 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
498 
499 	if (!csess)
500 		return TEE_ERROR_ITEM_NOT_FOUND;
501 
502 	sess = tee_ta_get_session(csess->id, true, open_sessions);
503 
504 	if (!sess) {
505 		EMSG("session 0x%" PRIxVA " to be removed is not found",
506 		     (vaddr_t)csess);
507 		return TEE_ERROR_ITEM_NOT_FOUND;
508 	}
509 
510 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
511 		tee_ta_put_session(sess);
512 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
513 	}
514 
515 	DMSG("Destroy session");
516 
517 	if (!sess->ts_sess.ctx) {
518 		destroy_session(sess, open_sessions);
519 		return TEE_SUCCESS;
520 	}
521 
522 	ctx = to_ta_ctx(sess->ts_sess.ctx);
523 	if (ctx->panicked) {
524 		destroy_session(sess, open_sessions);
525 	} else {
526 		tee_ta_set_busy(ctx);
527 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
528 		ctx->ts_ctx.ops->enter_close_session(&sess->ts_sess);
529 		destroy_session(sess, open_sessions);
530 		tee_ta_clear_busy(ctx);
531 	}
532 
533 	mutex_lock(&tee_ta_mutex);
534 
535 	if (ctx->ref_count <= 0)
536 		panic();
537 
538 	ctx->ref_count--;
539 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
540 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
541 	if (!ctx->ref_count && !keep_alive) {
542 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
543 		mutex_unlock(&tee_ta_mutex);
544 
545 		destroy_context(ctx);
546 	} else
547 		mutex_unlock(&tee_ta_mutex);
548 
549 	return TEE_SUCCESS;
550 }
551 
552 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
553 						   const TEE_UUID *uuid)
554 {
555 	struct tee_ta_ctx *ctx = NULL;
556 
557 	while (true) {
558 		ctx = tee_ta_context_find(uuid);
559 		if (!ctx)
560 			return TEE_ERROR_ITEM_NOT_FOUND;
561 
562 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
563 		    !to_user_ta_ctx(&ctx->ts_ctx)->is_initializing)
564 			break;
565 		/*
566 		 * Context is still initializing, wait here until it's
567 		 * fully initialized. Note that we're searching for the
568 		 * context again since it may have been removed while we
569 		 * where sleeping.
570 		 */
571 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
572 	}
573 
574 	/*
575 	 * If TA isn't single instance it should be loaded as new
576 	 * instance instead of doing anything with this instance.
577 	 * So tell the caller that we didn't find the TA it the
578 	 * caller will load a new instance.
579 	 */
580 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
581 		return TEE_ERROR_ITEM_NOT_FOUND;
582 
583 	/*
584 	 * The TA is single instance, if it isn't multi session we
585 	 * can't create another session unless its reference is zero
586 	 */
587 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
588 		return TEE_ERROR_BUSY;
589 
590 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
591 
592 	ctx->ref_count++;
593 	s->ts_sess.ctx = &ctx->ts_ctx;
594 	return TEE_SUCCESS;
595 }
596 
597 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
598 {
599 	struct tee_ta_session *last = NULL;
600 	uint32_t saved = 0;
601 	uint32_t id = 1;
602 
603 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
604 	if (last) {
605 		/* This value is less likely to be already used */
606 		id = last->id + 1;
607 		if (!id)
608 			id++; /* 0 is not valid */
609 	}
610 
611 	saved = id;
612 	do {
613 		if (!tee_ta_find_session_nolock(id, open_sessions))
614 			return id;
615 		id++;
616 		if (!id)
617 			id++;
618 	} while (id != saved);
619 
620 	return 0;
621 }
622 
623 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
624 				struct tee_ta_session_head *open_sessions,
625 				const TEE_UUID *uuid,
626 				struct tee_ta_session **sess)
627 {
628 	TEE_Result res;
629 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
630 
631 	*err = TEE_ORIGIN_TEE;
632 	if (!s)
633 		return TEE_ERROR_OUT_OF_MEMORY;
634 
635 	s->cancel_mask = true;
636 	condvar_init(&s->refc_cv);
637 	condvar_init(&s->lock_cv);
638 	s->lock_thread = THREAD_ID_INVALID;
639 	s->ref_count = 1;
640 
641 	mutex_lock(&tee_ta_mutex);
642 	s->id = new_session_id(open_sessions);
643 	if (!s->id) {
644 		res = TEE_ERROR_OVERFLOW;
645 		goto err_mutex_unlock;
646 	}
647 
648 	TAILQ_INSERT_TAIL(open_sessions, s, link);
649 
650 	/* Look for already loaded TA */
651 	res = tee_ta_init_session_with_context(s, uuid);
652 	mutex_unlock(&tee_ta_mutex);
653 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
654 		goto out;
655 
656 	/* Look for secure partition */
657 	res = stmm_init_session(uuid, s);
658 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
659 		goto out;
660 
661 	/* Look for pseudo TA */
662 	res = tee_ta_init_pseudo_ta_session(uuid, s);
663 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
664 		goto out;
665 
666 	/* Look for user TA */
667 	res = tee_ta_init_user_ta_session(uuid, s);
668 
669 out:
670 	if (!res) {
671 		*sess = s;
672 		return TEE_SUCCESS;
673 	}
674 
675 	mutex_lock(&tee_ta_mutex);
676 	TAILQ_REMOVE(open_sessions, s, link);
677 err_mutex_unlock:
678 	mutex_unlock(&tee_ta_mutex);
679 	free(s);
680 	return res;
681 }
682 
683 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
684 			       struct tee_ta_session **sess,
685 			       struct tee_ta_session_head *open_sessions,
686 			       const TEE_UUID *uuid,
687 			       const TEE_Identity *clnt_id,
688 			       uint32_t cancel_req_to,
689 			       struct tee_ta_param *param)
690 {
691 	TEE_Result res = TEE_SUCCESS;
692 	struct tee_ta_session *s = NULL;
693 	struct tee_ta_ctx *ctx = NULL;
694 	bool panicked = false;
695 	bool was_busy = false;
696 
697 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
698 	if (res != TEE_SUCCESS) {
699 		DMSG("init session failed 0x%x", res);
700 		return res;
701 	}
702 
703 	if (!check_params(s, param))
704 		return TEE_ERROR_BAD_PARAMETERS;
705 
706 	if (s->ts_sess.ctx)
707 		ctx = to_ta_ctx(s->ts_sess.ctx);
708 
709 	if (!ctx || ctx->panicked) {
710 		DMSG("panicked, call tee_ta_close_session()");
711 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
712 		*err = TEE_ORIGIN_TEE;
713 		return TEE_ERROR_TARGET_DEAD;
714 	}
715 
716 	*sess = s;
717 	/* Save identity of the owner of the session */
718 	s->clnt_id = *clnt_id;
719 
720 	if (tee_ta_try_set_busy(ctx)) {
721 		s->param = param;
722 		set_invoke_timeout(s, cancel_req_to);
723 		res = ctx->ts_ctx.ops->enter_open_session(&s->ts_sess);
724 		tee_ta_clear_busy(ctx);
725 	} else {
726 		/* Deadlock avoided */
727 		res = TEE_ERROR_BUSY;
728 		was_busy = true;
729 	}
730 
731 	panicked = ctx->panicked;
732 	s->param = NULL;
733 
734 	tee_ta_put_session(s);
735 	if (panicked || (res != TEE_SUCCESS))
736 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
737 
738 	/*
739 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
740 	 * apart from panicking.
741 	 */
742 	if (panicked || was_busy)
743 		*err = TEE_ORIGIN_TEE;
744 	else
745 		*err = s->err_origin;
746 
747 	if (res != TEE_SUCCESS)
748 		EMSG("Failed. Return error 0x%x", res);
749 
750 	return res;
751 }
752 
753 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
754 				 struct tee_ta_session *sess,
755 				 const TEE_Identity *clnt_id,
756 				 uint32_t cancel_req_to, uint32_t cmd,
757 				 struct tee_ta_param *param)
758 {
759 	struct tee_ta_ctx *ta_ctx = NULL;
760 	TEE_Result res = TEE_SUCCESS;
761 
762 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
763 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
764 
765 	if (!check_params(sess, param))
766 		return TEE_ERROR_BAD_PARAMETERS;
767 
768 	if (!sess->ts_sess.ctx) {
769 		/* The context has been already destroyed */
770 		*err = TEE_ORIGIN_TEE;
771 		return TEE_ERROR_TARGET_DEAD;
772 	}
773 
774 	ta_ctx = to_ta_ctx(sess->ts_sess.ctx);
775 	if (ta_ctx->panicked) {
776 		DMSG("Panicked !");
777 		destroy_ta_ctx_from_session(sess);
778 		*err = TEE_ORIGIN_TEE;
779 		return TEE_ERROR_TARGET_DEAD;
780 	}
781 
782 	tee_ta_set_busy(ta_ctx);
783 
784 	sess->param = param;
785 	set_invoke_timeout(sess, cancel_req_to);
786 	res = ta_ctx->ts_ctx.ops->enter_invoke_cmd(&sess->ts_sess, cmd);
787 
788 	sess->param = NULL;
789 	tee_ta_clear_busy(ta_ctx);
790 
791 	if (ta_ctx->panicked) {
792 		destroy_ta_ctx_from_session(sess);
793 		*err = TEE_ORIGIN_TEE;
794 		return TEE_ERROR_TARGET_DEAD;
795 	}
796 
797 	*err = sess->err_origin;
798 
799 	/* Short buffer is not an effective error case */
800 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
801 		DMSG("Error: %x of %d", res, *err);
802 
803 	return res;
804 }
805 
806 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
807 				 struct tee_ta_session *sess,
808 				 const TEE_Identity *clnt_id)
809 {
810 	*err = TEE_ORIGIN_TEE;
811 
812 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
813 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
814 
815 	sess->cancel = true;
816 	return TEE_SUCCESS;
817 }
818 
819 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
820 {
821 	TEE_Time current_time;
822 
823 	if (s->cancel_mask)
824 		return false;
825 
826 	if (s->cancel)
827 		return true;
828 
829 	if (s->cancel_time.seconds == UINT32_MAX)
830 		return false;
831 
832 	if (curr_time != NULL)
833 		current_time = *curr_time;
834 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
835 		return false;
836 
837 	if (current_time.seconds > s->cancel_time.seconds ||
838 	    (current_time.seconds == s->cancel_time.seconds &&
839 	     current_time.millis >= s->cancel_time.millis)) {
840 		return true;
841 	}
842 
843 	return false;
844 }
845 
846 #if defined(CFG_TA_GPROF_SUPPORT)
847 void tee_ta_gprof_sample_pc(vaddr_t pc)
848 {
849 	struct ts_session *s = ts_get_current_session();
850 	struct user_ta_ctx *utc = NULL;
851 	struct sample_buf *sbuf = NULL;
852 	TEE_Result res = 0;
853 	size_t idx = 0;
854 
855 	sbuf = s->sbuf;
856 	if (!sbuf || !sbuf->enabled)
857 		return; /* PC sampling is not enabled */
858 
859 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
860 	if (idx < sbuf->nsamples) {
861 		utc = to_user_ta_ctx(s->ctx);
862 		res = vm_check_access_rights(&utc->uctx,
863 					     TEE_MEMORY_ACCESS_READ |
864 					     TEE_MEMORY_ACCESS_WRITE |
865 					     TEE_MEMORY_ACCESS_ANY_OWNER,
866 					     (uaddr_t)&sbuf->samples[idx],
867 					     sizeof(*sbuf->samples));
868 		if (res != TEE_SUCCESS)
869 			return;
870 		sbuf->samples[idx]++;
871 	}
872 	sbuf->count++;
873 }
874 
875 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
876 				       uint64_t now)
877 {
878 	struct sample_buf *sbuf = s->sbuf;
879 
880 	if (!sbuf)
881 		return;
882 
883 	if (suspend) {
884 		assert(sbuf->usr_entered);
885 		sbuf->usr += now - sbuf->usr_entered;
886 		sbuf->usr_entered = 0;
887 	} else {
888 		assert(!sbuf->usr_entered);
889 		if (!now)
890 			now++; /* 0 is reserved */
891 		sbuf->usr_entered = now;
892 	}
893 }
894 
895 /*
896  * Update user-mode CPU time for the current session
897  * @suspend: true if session is being suspended (leaving user mode), false if
898  * it is resumed (entering user mode)
899  */
900 static void tee_ta_update_session_utime(bool suspend)
901 {
902 	struct ts_session *s = ts_get_current_session();
903 	uint64_t now = read_cntpct();
904 
905 	gprof_update_session_utime(suspend, s, now);
906 }
907 
908 void tee_ta_update_session_utime_suspend(void)
909 {
910 	tee_ta_update_session_utime(true);
911 }
912 
913 void tee_ta_update_session_utime_resume(void)
914 {
915 	tee_ta_update_session_utime(false);
916 }
917 #endif
918 
919 #if defined(CFG_FTRACE_SUPPORT)
920 static void ftrace_update_times(bool suspend)
921 {
922 	struct ts_session *s = ts_get_current_session();
923 	struct ftrace_buf *fbuf = NULL;
924 	uint64_t now = 0;
925 	uint32_t i = 0;
926 
927 	now = read_cntpct();
928 
929 	fbuf = s->fbuf;
930 	if (!fbuf)
931 		return;
932 
933 	if (suspend) {
934 		fbuf->suspend_time = now;
935 	} else {
936 		for (i = 0; i <= fbuf->ret_idx; i++)
937 			fbuf->begin_time[i] += now - fbuf->suspend_time;
938 	}
939 }
940 
941 void tee_ta_ftrace_update_times_suspend(void)
942 {
943 	ftrace_update_times(true);
944 }
945 
946 void tee_ta_ftrace_update_times_resume(void)
947 {
948 	ftrace_update_times(false);
949 }
950 #endif
951 
952 bool is_ta_ctx(struct ts_ctx *ctx)
953 {
954 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
955 }
956