xref: /optee_os/core/kernel/tee_ta_manager.c (revision 9fc2442cc66c279cb962c90c4375746fc9b28bb9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/panic.h>
11 #include <kernel/pseudo_ta.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tee_ta_manager.h>
16 #include <kernel/tee_time.h>
17 #include <kernel/thread.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/user_ta.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_cryp.h>
31 #include <tee/tee_svc_storage.h>
32 #include <trace.h>
33 #include <types_ext.h>
34 #include <user_ta_header.h>
35 #include <utee_types.h>
36 #include <util.h>
37 
38 /* This mutex protects the critical section in tee_ta_init_session */
39 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
40 /* This condvar is used when waiting for a TA context to become initialized */
41 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
42 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
43 
44 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
45 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
46 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
47 static size_t tee_ta_single_instance_count;
48 #endif
49 
50 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
51 static void lock_single_instance(void)
52 {
53 }
54 
55 static void unlock_single_instance(void)
56 {
57 }
58 
59 static bool has_single_instance_lock(void)
60 {
61 	return false;
62 }
63 #else
64 static void lock_single_instance(void)
65 {
66 	/* Requires tee_ta_mutex to be held */
67 	if (tee_ta_single_instance_thread != thread_get_id()) {
68 		/* Wait until the single-instance lock is available. */
69 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
70 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
71 
72 		tee_ta_single_instance_thread = thread_get_id();
73 		assert(tee_ta_single_instance_count == 0);
74 	}
75 
76 	tee_ta_single_instance_count++;
77 }
78 
79 static void unlock_single_instance(void)
80 {
81 	/* Requires tee_ta_mutex to be held */
82 	assert(tee_ta_single_instance_thread == thread_get_id());
83 	assert(tee_ta_single_instance_count > 0);
84 
85 	tee_ta_single_instance_count--;
86 	if (tee_ta_single_instance_count == 0) {
87 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
88 		condvar_signal(&tee_ta_cv);
89 	}
90 }
91 
92 static bool has_single_instance_lock(void)
93 {
94 	/* Requires tee_ta_mutex to be held */
95 	return tee_ta_single_instance_thread == thread_get_id();
96 }
97 #endif
98 
99 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
100 {
101 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
102 	return container_of(sess, struct tee_ta_session, ts_sess);
103 }
104 
105 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
106 {
107 	if (is_ta_ctx(ctx))
108 		return to_ta_ctx(ctx);
109 
110 	if (is_stmm_ctx(ctx))
111 		return &(to_stmm_ctx(ctx)->ta_ctx);
112 
113 	panic("bad context");
114 }
115 
116 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
117 {
118 	bool rc = true;
119 
120 	if (ctx->flags & TA_FLAG_CONCURRENT)
121 		return true;
122 
123 	mutex_lock(&tee_ta_mutex);
124 
125 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
126 		lock_single_instance();
127 
128 	if (has_single_instance_lock()) {
129 		if (ctx->busy) {
130 			/*
131 			 * We're holding the single-instance lock and the
132 			 * TA is busy, as waiting now would only cause a
133 			 * dead-lock, we release the lock and return false.
134 			 */
135 			rc = false;
136 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
137 				unlock_single_instance();
138 		}
139 	} else {
140 		/*
141 		 * We're not holding the single-instance lock, we're free to
142 		 * wait for the TA to become available.
143 		 */
144 		while (ctx->busy)
145 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
146 	}
147 
148 	/* Either it's already true or we should set it to true */
149 	ctx->busy = true;
150 
151 	mutex_unlock(&tee_ta_mutex);
152 	return rc;
153 }
154 
155 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
156 {
157 	if (!tee_ta_try_set_busy(ctx))
158 		panic();
159 }
160 
161 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
162 {
163 	if (ctx->flags & TA_FLAG_CONCURRENT)
164 		return;
165 
166 	mutex_lock(&tee_ta_mutex);
167 
168 	assert(ctx->busy);
169 	ctx->busy = false;
170 	condvar_signal(&ctx->busy_cv);
171 
172 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
173 		unlock_single_instance();
174 
175 	ctx->initializing = false;
176 
177 	mutex_unlock(&tee_ta_mutex);
178 }
179 
180 static void dec_session_ref_count(struct tee_ta_session *s)
181 {
182 	assert(s->ref_count > 0);
183 	s->ref_count--;
184 	if (s->ref_count == 1)
185 		condvar_signal(&s->refc_cv);
186 }
187 
188 void tee_ta_put_session(struct tee_ta_session *s)
189 {
190 	mutex_lock(&tee_ta_mutex);
191 
192 	if (s->lock_thread == thread_get_id()) {
193 		s->lock_thread = THREAD_ID_INVALID;
194 		condvar_signal(&s->lock_cv);
195 	}
196 	dec_session_ref_count(s);
197 
198 	mutex_unlock(&tee_ta_mutex);
199 }
200 
201 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
202 			struct tee_ta_session_head *open_sessions)
203 {
204 	struct tee_ta_session *s = NULL;
205 	struct tee_ta_session *found = NULL;
206 
207 	TAILQ_FOREACH(s, open_sessions, link) {
208 		if (s->id == id) {
209 			found = s;
210 			break;
211 		}
212 	}
213 
214 	return found;
215 }
216 
217 struct tee_ta_session *tee_ta_find_session(uint32_t id,
218 			struct tee_ta_session_head *open_sessions)
219 {
220 	struct tee_ta_session *s = NULL;
221 
222 	mutex_lock(&tee_ta_mutex);
223 
224 	s = tee_ta_find_session_nolock(id, open_sessions);
225 
226 	mutex_unlock(&tee_ta_mutex);
227 
228 	return s;
229 }
230 
231 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
232 			struct tee_ta_session_head *open_sessions)
233 {
234 	struct tee_ta_session *s;
235 
236 	mutex_lock(&tee_ta_mutex);
237 
238 	while (true) {
239 		s = tee_ta_find_session_nolock(id, open_sessions);
240 		if (!s)
241 			break;
242 		if (s->unlink) {
243 			s = NULL;
244 			break;
245 		}
246 		s->ref_count++;
247 		if (!exclusive)
248 			break;
249 
250 		assert(s->lock_thread != thread_get_id());
251 
252 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
253 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
254 
255 		if (s->unlink) {
256 			dec_session_ref_count(s);
257 			s = NULL;
258 			break;
259 		}
260 
261 		s->lock_thread = thread_get_id();
262 		break;
263 	}
264 
265 	mutex_unlock(&tee_ta_mutex);
266 	return s;
267 }
268 
269 static void tee_ta_unlink_session(struct tee_ta_session *s,
270 			struct tee_ta_session_head *open_sessions)
271 {
272 	mutex_lock(&tee_ta_mutex);
273 
274 	assert(s->ref_count >= 1);
275 	assert(s->lock_thread == thread_get_id());
276 	assert(!s->unlink);
277 
278 	s->unlink = true;
279 	condvar_broadcast(&s->lock_cv);
280 
281 	while (s->ref_count != 1)
282 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
283 
284 	TAILQ_REMOVE(open_sessions, s, link);
285 
286 	mutex_unlock(&tee_ta_mutex);
287 }
288 
289 static void destroy_session(struct tee_ta_session *s,
290 			    struct tee_ta_session_head *open_sessions)
291 {
292 #if defined(CFG_FTRACE_SUPPORT)
293 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
294 		ts_push_current_session(&s->ts_sess);
295 		s->ts_sess.fbuf = NULL;
296 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
297 		ts_pop_current_session();
298 	}
299 #endif
300 
301 	tee_ta_unlink_session(s, open_sessions);
302 #if defined(CFG_TA_GPROF_SUPPORT)
303 	free(s->ts_sess.sbuf);
304 #endif
305 	free(s);
306 }
307 
308 static void destroy_context(struct tee_ta_ctx *ctx)
309 {
310 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
311 
312 	condvar_destroy(&ctx->busy_cv);
313 	pgt_flush_ctx(&ctx->ts_ctx);
314 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
315 }
316 
317 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
318 {
319 	struct tee_ta_session *sess = NULL;
320 	struct tee_ta_session_head *open_sessions = NULL;
321 	struct tee_ta_ctx *ctx = NULL;
322 	struct user_ta_ctx *utc = NULL;
323 	struct ts_ctx *ts_ctx = s->ts_sess.ctx;
324 	size_t count = 1; /* start counting the references to the context */
325 
326 	DMSG("Remove references to context (%#"PRIxVA")", (vaddr_t)ts_ctx);
327 
328 	mutex_lock(&tee_ta_mutex);
329 	nsec_sessions_list_head(&open_sessions);
330 
331 	/*
332 	 * Next two loops will remove all references to the context which is
333 	 * about to be destroyed, but avoiding such operation to the current
334 	 * session. That will be done later in this function, only after
335 	 * the context will be properly destroyed.
336 	 */
337 
338 	/*
339 	 * Scan the entire list of opened sessions by the clients from
340 	 * non-secure world.
341 	 */
342 	TAILQ_FOREACH(sess, open_sessions, link) {
343 		if (sess->ts_sess.ctx == ts_ctx && sess != s) {
344 			sess->ts_sess.ctx = NULL;
345 			count++;
346 		}
347 	}
348 
349 	/*
350 	 * Scan all sessions opened from secure side by searching through
351 	 * all available TA instances and for each context, scan all opened
352 	 * sessions.
353 	 */
354 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
355 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
356 			utc = to_user_ta_ctx(&ctx->ts_ctx);
357 
358 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
359 				if (sess->ts_sess.ctx == ts_ctx &&
360 				    sess != s) {
361 					sess->ts_sess.ctx = NULL;
362 					count++;
363 				}
364 			}
365 		}
366 	}
367 
368 	ctx = ts_to_ta_ctx(ts_ctx);
369 	assert(count == ctx->ref_count);
370 
371 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
372 	mutex_unlock(&tee_ta_mutex);
373 
374 	destroy_context(ctx);
375 	s->ts_sess.ctx = NULL;
376 }
377 
378 /*
379  * tee_ta_context_find - Find TA in session list based on a UUID (input)
380  * Returns a pointer to the session
381  */
382 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
383 {
384 	struct tee_ta_ctx *ctx;
385 
386 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
387 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
388 			return ctx;
389 	}
390 
391 	return NULL;
392 }
393 
394 /* check if requester (client ID) matches session initial client */
395 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
396 {
397 	if (id == KERN_IDENTITY)
398 		return TEE_SUCCESS;
399 
400 	if (id == NSAPP_IDENTITY) {
401 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
402 			DMSG("nsec tries to hijack TA session");
403 			return TEE_ERROR_ACCESS_DENIED;
404 		}
405 		return TEE_SUCCESS;
406 	}
407 
408 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
409 		DMSG("client id mismatch");
410 		return TEE_ERROR_ACCESS_DENIED;
411 	}
412 	return TEE_SUCCESS;
413 }
414 
415 /*
416  * Check if invocation parameters matches TA properties
417  *
418  * @s - current session handle
419  * @param - already identified memory references hold a valid 'mobj'.
420  *
421  * Policy:
422  * - All TAs can access 'non-secure' shared memory.
423  * - All TAs can access TEE private memory (seccpy)
424  * - Only SDP flagged TAs can accept SDP memory references.
425  */
426 #ifndef CFG_SECURE_DATA_PATH
427 static bool check_params(struct tee_ta_session *sess __unused,
428 			 struct tee_ta_param *param __unused)
429 {
430 	/*
431 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
432 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
433 	 * permissions regarding memory reference parameters.
434 	 */
435 	return true;
436 }
437 #else
438 static bool check_params(struct tee_ta_session *sess,
439 			 struct tee_ta_param *param)
440 {
441 	int n;
442 
443 	/*
444 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
445 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
446 	 */
447 	if (sess->ts_sess.ctx &&
448 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
449 		return true;
450 
451 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
452 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
453 		struct param_mem *mem = &param->u[n].mem;
454 
455 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
456 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
457 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
458 			continue;
459 		if (!mem->size)
460 			continue;
461 		if (mobj_is_sdp_mem(mem->mobj))
462 			return false;
463 	}
464 	return true;
465 }
466 #endif
467 
468 static void set_invoke_timeout(struct tee_ta_session *sess,
469 				      uint32_t cancel_req_to)
470 {
471 	TEE_Time current_time;
472 	TEE_Time cancel_time;
473 
474 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
475 		goto infinite;
476 
477 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
478 		goto infinite;
479 
480 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
481 			 &cancel_time.seconds))
482 		goto infinite;
483 
484 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
485 	if (cancel_time.millis > 1000) {
486 		if (ADD_OVERFLOW(current_time.seconds, 1,
487 				 &cancel_time.seconds))
488 			goto infinite;
489 
490 		cancel_time.seconds++;
491 		cancel_time.millis -= 1000;
492 	}
493 
494 	sess->cancel_time = cancel_time;
495 	return;
496 
497 infinite:
498 	sess->cancel_time.seconds = UINT32_MAX;
499 	sess->cancel_time.millis = UINT32_MAX;
500 }
501 
502 /*-----------------------------------------------------------------------------
503  * Close a Trusted Application and free available resources
504  *---------------------------------------------------------------------------*/
505 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
506 				struct tee_ta_session_head *open_sessions,
507 				const TEE_Identity *clnt_id)
508 {
509 	struct tee_ta_session *sess = NULL;
510 	struct tee_ta_ctx *ctx = NULL;
511 	struct ts_ctx *ts_ctx = NULL;
512 	bool keep_alive = false;
513 
514 	DMSG("csess 0x%" PRIxVA " id %u",
515 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
516 
517 	if (!csess)
518 		return TEE_ERROR_ITEM_NOT_FOUND;
519 
520 	sess = tee_ta_get_session(csess->id, true, open_sessions);
521 
522 	if (!sess) {
523 		EMSG("session 0x%" PRIxVA " to be removed is not found",
524 		     (vaddr_t)csess);
525 		return TEE_ERROR_ITEM_NOT_FOUND;
526 	}
527 
528 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
529 		tee_ta_put_session(sess);
530 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
531 	}
532 
533 	DMSG("Destroy session");
534 
535 	ts_ctx = sess->ts_sess.ctx;
536 	if (!ts_ctx) {
537 		destroy_session(sess, open_sessions);
538 		return TEE_SUCCESS;
539 	}
540 
541 	ctx = ts_to_ta_ctx(ts_ctx);
542 	if (ctx->panicked) {
543 		destroy_session(sess, open_sessions);
544 	} else {
545 		tee_ta_set_busy(ctx);
546 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
547 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
548 		destroy_session(sess, open_sessions);
549 		tee_ta_clear_busy(ctx);
550 	}
551 
552 	mutex_lock(&tee_ta_mutex);
553 
554 	if (ctx->ref_count <= 0)
555 		panic();
556 
557 	ctx->ref_count--;
558 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
559 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
560 	if (!ctx->ref_count && !keep_alive) {
561 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
562 		mutex_unlock(&tee_ta_mutex);
563 
564 		destroy_context(ctx);
565 	} else
566 		mutex_unlock(&tee_ta_mutex);
567 
568 	return TEE_SUCCESS;
569 }
570 
571 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
572 						   const TEE_UUID *uuid)
573 {
574 	struct tee_ta_ctx *ctx = NULL;
575 
576 	while (true) {
577 		ctx = tee_ta_context_find(uuid);
578 		if (!ctx)
579 			return TEE_ERROR_ITEM_NOT_FOUND;
580 
581 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
582 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
583 			break;
584 		/*
585 		 * Context is still initializing, wait here until it's
586 		 * fully initialized. Note that we're searching for the
587 		 * context again since it may have been removed while we
588 		 * where sleeping.
589 		 */
590 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
591 	}
592 
593 	/*
594 	 * If TA isn't single instance it should be loaded as new
595 	 * instance instead of doing anything with this instance.
596 	 * So tell the caller that we didn't find the TA it the
597 	 * caller will load a new instance.
598 	 */
599 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
600 		return TEE_ERROR_ITEM_NOT_FOUND;
601 
602 	/*
603 	 * The TA is single instance, if it isn't multi session we
604 	 * can't create another session unless its reference is zero
605 	 */
606 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
607 		return TEE_ERROR_BUSY;
608 
609 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
610 
611 	ctx->ref_count++;
612 	s->ts_sess.ctx = &ctx->ts_ctx;
613 	s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc;
614 	return TEE_SUCCESS;
615 }
616 
617 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
618 {
619 	struct tee_ta_session *last = NULL;
620 	uint32_t saved = 0;
621 	uint32_t id = 1;
622 
623 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
624 	if (last) {
625 		/* This value is less likely to be already used */
626 		id = last->id + 1;
627 		if (!id)
628 			id++; /* 0 is not valid */
629 	}
630 
631 	saved = id;
632 	do {
633 		if (!tee_ta_find_session_nolock(id, open_sessions))
634 			return id;
635 		id++;
636 		if (!id)
637 			id++;
638 	} while (id != saved);
639 
640 	return 0;
641 }
642 
643 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
644 				struct tee_ta_session_head *open_sessions,
645 				const TEE_UUID *uuid,
646 				struct tee_ta_session **sess)
647 {
648 	TEE_Result res;
649 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
650 
651 	*err = TEE_ORIGIN_TEE;
652 	if (!s)
653 		return TEE_ERROR_OUT_OF_MEMORY;
654 
655 	s->cancel_mask = true;
656 	condvar_init(&s->refc_cv);
657 	condvar_init(&s->lock_cv);
658 	s->lock_thread = THREAD_ID_INVALID;
659 	s->ref_count = 1;
660 
661 	mutex_lock(&tee_ta_mutex);
662 	s->id = new_session_id(open_sessions);
663 	if (!s->id) {
664 		res = TEE_ERROR_OVERFLOW;
665 		goto err_mutex_unlock;
666 	}
667 
668 	TAILQ_INSERT_TAIL(open_sessions, s, link);
669 
670 	/* Look for already loaded TA */
671 	res = tee_ta_init_session_with_context(s, uuid);
672 	mutex_unlock(&tee_ta_mutex);
673 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
674 		goto out;
675 
676 	/* Look for secure partition */
677 	res = stmm_init_session(uuid, s);
678 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
679 		goto out;
680 
681 	/* Look for pseudo TA */
682 	res = tee_ta_init_pseudo_ta_session(uuid, s);
683 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
684 		goto out;
685 
686 	/* Look for user TA */
687 	res = tee_ta_init_user_ta_session(uuid, s);
688 
689 out:
690 	if (!res) {
691 		*sess = s;
692 		return TEE_SUCCESS;
693 	}
694 
695 	mutex_lock(&tee_ta_mutex);
696 	TAILQ_REMOVE(open_sessions, s, link);
697 err_mutex_unlock:
698 	mutex_unlock(&tee_ta_mutex);
699 	free(s);
700 	return res;
701 }
702 
703 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
704 			       struct tee_ta_session **sess,
705 			       struct tee_ta_session_head *open_sessions,
706 			       const TEE_UUID *uuid,
707 			       const TEE_Identity *clnt_id,
708 			       uint32_t cancel_req_to,
709 			       struct tee_ta_param *param)
710 {
711 	TEE_Result res = TEE_SUCCESS;
712 	struct tee_ta_session *s = NULL;
713 	struct tee_ta_ctx *ctx = NULL;
714 	struct ts_ctx *ts_ctx = NULL;
715 	bool panicked = false;
716 	bool was_busy = false;
717 
718 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
719 	if (res != TEE_SUCCESS) {
720 		DMSG("init session failed 0x%x", res);
721 		return res;
722 	}
723 
724 	if (!check_params(s, param))
725 		return TEE_ERROR_BAD_PARAMETERS;
726 
727 	ts_ctx = s->ts_sess.ctx;
728 	if (ts_ctx)
729 		ctx = ts_to_ta_ctx(ts_ctx);
730 
731 	if (!ctx || ctx->panicked) {
732 		DMSG("panicked, call tee_ta_close_session()");
733 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
734 		*err = TEE_ORIGIN_TEE;
735 		return TEE_ERROR_TARGET_DEAD;
736 	}
737 
738 	*sess = s;
739 	/* Save identity of the owner of the session */
740 	s->clnt_id = *clnt_id;
741 
742 	if (tee_ta_try_set_busy(ctx)) {
743 		s->param = param;
744 		set_invoke_timeout(s, cancel_req_to);
745 		res = ts_ctx->ops->enter_open_session(&s->ts_sess);
746 		tee_ta_clear_busy(ctx);
747 	} else {
748 		/* Deadlock avoided */
749 		res = TEE_ERROR_BUSY;
750 		was_busy = true;
751 	}
752 
753 	panicked = ctx->panicked;
754 	s->param = NULL;
755 
756 	tee_ta_put_session(s);
757 	if (panicked || (res != TEE_SUCCESS))
758 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
759 
760 	/*
761 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
762 	 * apart from panicking.
763 	 */
764 	if (panicked || was_busy)
765 		*err = TEE_ORIGIN_TEE;
766 	else
767 		*err = s->err_origin;
768 
769 	if (res != TEE_SUCCESS)
770 		EMSG("Failed. Return error 0x%x", res);
771 
772 	return res;
773 }
774 
775 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
776 				 struct tee_ta_session *sess,
777 				 const TEE_Identity *clnt_id,
778 				 uint32_t cancel_req_to, uint32_t cmd,
779 				 struct tee_ta_param *param)
780 {
781 	struct tee_ta_ctx *ta_ctx = NULL;
782 	struct ts_ctx *ts_ctx = NULL;
783 	TEE_Result res = TEE_SUCCESS;
784 
785 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
786 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
787 
788 	if (!check_params(sess, param))
789 		return TEE_ERROR_BAD_PARAMETERS;
790 
791 	ts_ctx = sess->ts_sess.ctx;
792 	if (!ts_ctx) {
793 		/* The context has been already destroyed */
794 		*err = TEE_ORIGIN_TEE;
795 		return TEE_ERROR_TARGET_DEAD;
796 	}
797 
798 	ta_ctx = ts_to_ta_ctx(ts_ctx);
799 	if (ta_ctx->panicked) {
800 		DMSG("Panicked !");
801 		destroy_ta_ctx_from_session(sess);
802 		*err = TEE_ORIGIN_TEE;
803 		return TEE_ERROR_TARGET_DEAD;
804 	}
805 
806 	tee_ta_set_busy(ta_ctx);
807 
808 	sess->param = param;
809 	set_invoke_timeout(sess, cancel_req_to);
810 	res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
811 
812 	sess->param = NULL;
813 	tee_ta_clear_busy(ta_ctx);
814 
815 	if (ta_ctx->panicked) {
816 		destroy_ta_ctx_from_session(sess);
817 		*err = TEE_ORIGIN_TEE;
818 		return TEE_ERROR_TARGET_DEAD;
819 	}
820 
821 	*err = sess->err_origin;
822 
823 	/* Short buffer is not an effective error case */
824 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
825 		DMSG("Error: %x of %d", res, *err);
826 
827 	return res;
828 }
829 
830 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
831 				 struct tee_ta_session *sess,
832 				 const TEE_Identity *clnt_id)
833 {
834 	*err = TEE_ORIGIN_TEE;
835 
836 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
837 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
838 
839 	sess->cancel = true;
840 	return TEE_SUCCESS;
841 }
842 
843 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
844 {
845 	TEE_Time current_time;
846 
847 	if (s->cancel_mask)
848 		return false;
849 
850 	if (s->cancel)
851 		return true;
852 
853 	if (s->cancel_time.seconds == UINT32_MAX)
854 		return false;
855 
856 	if (curr_time != NULL)
857 		current_time = *curr_time;
858 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
859 		return false;
860 
861 	if (current_time.seconds > s->cancel_time.seconds ||
862 	    (current_time.seconds == s->cancel_time.seconds &&
863 	     current_time.millis >= s->cancel_time.millis)) {
864 		return true;
865 	}
866 
867 	return false;
868 }
869 
870 #if defined(CFG_TA_GPROF_SUPPORT)
871 void tee_ta_gprof_sample_pc(vaddr_t pc)
872 {
873 	struct ts_session *s = ts_get_current_session();
874 	struct user_ta_ctx *utc = NULL;
875 	struct sample_buf *sbuf = NULL;
876 	TEE_Result res = 0;
877 	size_t idx = 0;
878 
879 	sbuf = s->sbuf;
880 	if (!sbuf || !sbuf->enabled)
881 		return; /* PC sampling is not enabled */
882 
883 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
884 	if (idx < sbuf->nsamples) {
885 		utc = to_user_ta_ctx(s->ctx);
886 		res = vm_check_access_rights(&utc->uctx,
887 					     TEE_MEMORY_ACCESS_READ |
888 					     TEE_MEMORY_ACCESS_WRITE |
889 					     TEE_MEMORY_ACCESS_ANY_OWNER,
890 					     (uaddr_t)&sbuf->samples[idx],
891 					     sizeof(*sbuf->samples));
892 		if (res != TEE_SUCCESS)
893 			return;
894 		sbuf->samples[idx]++;
895 	}
896 	sbuf->count++;
897 }
898 
899 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
900 				       uint64_t now)
901 {
902 	struct sample_buf *sbuf = s->sbuf;
903 
904 	if (!sbuf)
905 		return;
906 
907 	if (suspend) {
908 		assert(sbuf->usr_entered);
909 		sbuf->usr += now - sbuf->usr_entered;
910 		sbuf->usr_entered = 0;
911 	} else {
912 		assert(!sbuf->usr_entered);
913 		if (!now)
914 			now++; /* 0 is reserved */
915 		sbuf->usr_entered = now;
916 	}
917 }
918 
919 /*
920  * Update user-mode CPU time for the current session
921  * @suspend: true if session is being suspended (leaving user mode), false if
922  * it is resumed (entering user mode)
923  */
924 static void tee_ta_update_session_utime(bool suspend)
925 {
926 	struct ts_session *s = ts_get_current_session();
927 	uint64_t now = barrier_read_cntpct();
928 
929 	gprof_update_session_utime(suspend, s, now);
930 }
931 
932 void tee_ta_update_session_utime_suspend(void)
933 {
934 	tee_ta_update_session_utime(true);
935 }
936 
937 void tee_ta_update_session_utime_resume(void)
938 {
939 	tee_ta_update_session_utime(false);
940 }
941 #endif
942 
943 #if defined(CFG_FTRACE_SUPPORT)
944 static void ftrace_update_times(bool suspend)
945 {
946 	struct ts_session *s = ts_get_current_session_may_fail();
947 	struct ftrace_buf *fbuf = NULL;
948 	uint64_t now = 0;
949 	uint32_t i = 0;
950 
951 	if (!s)
952 		return;
953 
954 	now = barrier_read_cntpct();
955 
956 	fbuf = s->fbuf;
957 	if (!fbuf)
958 		return;
959 
960 	if (suspend) {
961 		fbuf->suspend_time = now;
962 	} else {
963 		for (i = 0; i <= fbuf->ret_idx; i++)
964 			fbuf->begin_time[i] += now - fbuf->suspend_time;
965 	}
966 }
967 
968 void tee_ta_ftrace_update_times_suspend(void)
969 {
970 	ftrace_update_times(true);
971 }
972 
973 void tee_ta_ftrace_update_times_resume(void)
974 {
975 	ftrace_update_times(false);
976 }
977 #endif
978 
979 bool is_ta_ctx(struct ts_ctx *ctx)
980 {
981 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
982 }
983