xref: /optee_os/core/kernel/tee_ta_manager.c (revision c44d734b6366cbf4d12610310e809872db65f89d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/mobj.h>
22 #include <mm/vm.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <tee_api_types.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_cryp.h>
30 #include <tee/tee_svc_storage.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36 
37 /* This mutex protects the critical section in tee_ta_init_session */
38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
39 /* This condvar is used when waiting for a TA context to become initialized */
40 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
41 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
42 
43 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
44 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
45 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
46 static size_t tee_ta_single_instance_count;
47 #endif
48 
49 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
50 static void lock_single_instance(void)
51 {
52 }
53 
54 static void unlock_single_instance(void)
55 {
56 }
57 
58 static bool has_single_instance_lock(void)
59 {
60 	return false;
61 }
62 #else
63 static void lock_single_instance(void)
64 {
65 	/* Requires tee_ta_mutex to be held */
66 	if (tee_ta_single_instance_thread != thread_get_id()) {
67 		/* Wait until the single-instance lock is available. */
68 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
69 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
70 
71 		tee_ta_single_instance_thread = thread_get_id();
72 		assert(tee_ta_single_instance_count == 0);
73 	}
74 
75 	tee_ta_single_instance_count++;
76 }
77 
78 static void unlock_single_instance(void)
79 {
80 	/* Requires tee_ta_mutex to be held */
81 	assert(tee_ta_single_instance_thread == thread_get_id());
82 	assert(tee_ta_single_instance_count > 0);
83 
84 	tee_ta_single_instance_count--;
85 	if (tee_ta_single_instance_count == 0) {
86 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
87 		condvar_signal(&tee_ta_cv);
88 	}
89 }
90 
91 static bool has_single_instance_lock(void)
92 {
93 	/* Requires tee_ta_mutex to be held */
94 	return tee_ta_single_instance_thread == thread_get_id();
95 }
96 #endif
97 
98 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
99 {
100 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
101 	return container_of(sess, struct tee_ta_session, ts_sess);
102 }
103 
104 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
105 {
106 	if (is_ta_ctx(ctx))
107 		return to_ta_ctx(ctx);
108 
109 	if (is_stmm_ctx(ctx))
110 		return &(to_stmm_ctx(ctx)->ta_ctx);
111 
112 	panic("bad context");
113 }
114 
115 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
116 {
117 	bool rc = true;
118 
119 	if (ctx->flags & TA_FLAG_CONCURRENT)
120 		return true;
121 
122 	mutex_lock(&tee_ta_mutex);
123 
124 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
125 		lock_single_instance();
126 
127 	if (has_single_instance_lock()) {
128 		if (ctx->busy) {
129 			/*
130 			 * We're holding the single-instance lock and the
131 			 * TA is busy, as waiting now would only cause a
132 			 * dead-lock, we release the lock and return false.
133 			 */
134 			rc = false;
135 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
136 				unlock_single_instance();
137 		}
138 	} else {
139 		/*
140 		 * We're not holding the single-instance lock, we're free to
141 		 * wait for the TA to become available.
142 		 */
143 		while (ctx->busy)
144 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
145 	}
146 
147 	/* Either it's already true or we should set it to true */
148 	ctx->busy = true;
149 
150 	mutex_unlock(&tee_ta_mutex);
151 	return rc;
152 }
153 
154 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
155 {
156 	if (!tee_ta_try_set_busy(ctx))
157 		panic();
158 }
159 
160 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
161 {
162 	if (ctx->flags & TA_FLAG_CONCURRENT)
163 		return;
164 
165 	mutex_lock(&tee_ta_mutex);
166 
167 	assert(ctx->busy);
168 	ctx->busy = false;
169 	condvar_signal(&ctx->busy_cv);
170 
171 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
172 		unlock_single_instance();
173 
174 	mutex_unlock(&tee_ta_mutex);
175 }
176 
177 static void dec_session_ref_count(struct tee_ta_session *s)
178 {
179 	assert(s->ref_count > 0);
180 	s->ref_count--;
181 	if (s->ref_count == 1)
182 		condvar_signal(&s->refc_cv);
183 }
184 
185 void tee_ta_put_session(struct tee_ta_session *s)
186 {
187 	mutex_lock(&tee_ta_mutex);
188 
189 	if (s->lock_thread == thread_get_id()) {
190 		s->lock_thread = THREAD_ID_INVALID;
191 		condvar_signal(&s->lock_cv);
192 	}
193 	dec_session_ref_count(s);
194 
195 	mutex_unlock(&tee_ta_mutex);
196 }
197 
198 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
199 			struct tee_ta_session_head *open_sessions)
200 {
201 	struct tee_ta_session *s = NULL;
202 	struct tee_ta_session *found = NULL;
203 
204 	TAILQ_FOREACH(s, open_sessions, link) {
205 		if (s->id == id) {
206 			found = s;
207 			break;
208 		}
209 	}
210 
211 	return found;
212 }
213 
214 struct tee_ta_session *tee_ta_find_session(uint32_t id,
215 			struct tee_ta_session_head *open_sessions)
216 {
217 	struct tee_ta_session *s = NULL;
218 
219 	mutex_lock(&tee_ta_mutex);
220 
221 	s = tee_ta_find_session_nolock(id, open_sessions);
222 
223 	mutex_unlock(&tee_ta_mutex);
224 
225 	return s;
226 }
227 
228 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
229 			struct tee_ta_session_head *open_sessions)
230 {
231 	struct tee_ta_session *s;
232 
233 	mutex_lock(&tee_ta_mutex);
234 
235 	while (true) {
236 		s = tee_ta_find_session_nolock(id, open_sessions);
237 		if (!s)
238 			break;
239 		if (s->unlink) {
240 			s = NULL;
241 			break;
242 		}
243 		s->ref_count++;
244 		if (!exclusive)
245 			break;
246 
247 		assert(s->lock_thread != thread_get_id());
248 
249 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
250 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
251 
252 		if (s->unlink) {
253 			dec_session_ref_count(s);
254 			s = NULL;
255 			break;
256 		}
257 
258 		s->lock_thread = thread_get_id();
259 		break;
260 	}
261 
262 	mutex_unlock(&tee_ta_mutex);
263 	return s;
264 }
265 
266 static void tee_ta_unlink_session(struct tee_ta_session *s,
267 			struct tee_ta_session_head *open_sessions)
268 {
269 	mutex_lock(&tee_ta_mutex);
270 
271 	assert(s->ref_count >= 1);
272 	assert(s->lock_thread == thread_get_id());
273 	assert(!s->unlink);
274 
275 	s->unlink = true;
276 	condvar_broadcast(&s->lock_cv);
277 
278 	while (s->ref_count != 1)
279 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
280 
281 	TAILQ_REMOVE(open_sessions, s, link);
282 
283 	mutex_unlock(&tee_ta_mutex);
284 }
285 
286 static void destroy_session(struct tee_ta_session *s,
287 			    struct tee_ta_session_head *open_sessions)
288 {
289 #if defined(CFG_FTRACE_SUPPORT)
290 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
291 		ts_push_current_session(&s->ts_sess);
292 		s->ts_sess.fbuf = NULL;
293 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
294 		ts_pop_current_session();
295 	}
296 #endif
297 
298 	tee_ta_unlink_session(s, open_sessions);
299 #if defined(CFG_TA_GPROF_SUPPORT)
300 	free(s->ts_sess.sbuf);
301 #endif
302 	free(s);
303 }
304 
305 static void destroy_context(struct tee_ta_ctx *ctx)
306 {
307 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
308 
309 	condvar_destroy(&ctx->busy_cv);
310 	pgt_flush_ctx(&ctx->ts_ctx);
311 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
312 }
313 
314 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
315 {
316 	struct tee_ta_session *sess = NULL;
317 	struct tee_ta_session_head *open_sessions = NULL;
318 	struct tee_ta_ctx *ctx = NULL;
319 	struct user_ta_ctx *utc = NULL;
320 	struct ts_ctx *ts_ctx = s->ts_sess.ctx;
321 	size_t count = 1; /* start counting the references to the context */
322 
323 	DMSG("Remove references to context (%#"PRIxVA")", (vaddr_t)ts_ctx);
324 
325 	mutex_lock(&tee_ta_mutex);
326 	nsec_sessions_list_head(&open_sessions);
327 
328 	/*
329 	 * Next two loops will remove all references to the context which is
330 	 * about to be destroyed, but avoiding such operation to the current
331 	 * session. That will be done later in this function, only after
332 	 * the context will be properly destroyed.
333 	 */
334 
335 	/*
336 	 * Scan the entire list of opened sessions by the clients from
337 	 * non-secure world.
338 	 */
339 	TAILQ_FOREACH(sess, open_sessions, link) {
340 		if (sess->ts_sess.ctx == ts_ctx && sess != s) {
341 			sess->ts_sess.ctx = NULL;
342 			count++;
343 		}
344 	}
345 
346 	/*
347 	 * Scan all sessions opened from secure side by searching through
348 	 * all available TA instances and for each context, scan all opened
349 	 * sessions.
350 	 */
351 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
352 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
353 			utc = to_user_ta_ctx(&ctx->ts_ctx);
354 
355 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
356 				if (sess->ts_sess.ctx == ts_ctx &&
357 				    sess != s) {
358 					sess->ts_sess.ctx = NULL;
359 					count++;
360 				}
361 			}
362 		}
363 	}
364 
365 	ctx = ts_to_ta_ctx(ts_ctx);
366 	assert(count == ctx->ref_count);
367 
368 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
369 	mutex_unlock(&tee_ta_mutex);
370 
371 	destroy_context(ctx);
372 	s->ts_sess.ctx = NULL;
373 }
374 
375 /*
376  * tee_ta_context_find - Find TA in session list based on a UUID (input)
377  * Returns a pointer to the session
378  */
379 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
380 {
381 	struct tee_ta_ctx *ctx;
382 
383 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
384 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
385 			return ctx;
386 	}
387 
388 	return NULL;
389 }
390 
391 /* check if requester (client ID) matches session initial client */
392 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
393 {
394 	if (id == KERN_IDENTITY)
395 		return TEE_SUCCESS;
396 
397 	if (id == NSAPP_IDENTITY) {
398 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
399 			DMSG("nsec tries to hijack TA session");
400 			return TEE_ERROR_ACCESS_DENIED;
401 		}
402 		return TEE_SUCCESS;
403 	}
404 
405 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
406 		DMSG("client id mismatch");
407 		return TEE_ERROR_ACCESS_DENIED;
408 	}
409 	return TEE_SUCCESS;
410 }
411 
412 /*
413  * Check if invocation parameters matches TA properties
414  *
415  * @s - current session handle
416  * @param - already identified memory references hold a valid 'mobj'.
417  *
418  * Policy:
419  * - All TAs can access 'non-secure' shared memory.
420  * - All TAs can access TEE private memory (seccpy)
421  * - Only SDP flagged TAs can accept SDP memory references.
422  */
423 #ifndef CFG_SECURE_DATA_PATH
424 static bool check_params(struct tee_ta_session *sess __unused,
425 			 struct tee_ta_param *param __unused)
426 {
427 	/*
428 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
429 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
430 	 * permissions regarding memory reference parameters.
431 	 */
432 	return true;
433 }
434 #else
435 static bool check_params(struct tee_ta_session *sess,
436 			 struct tee_ta_param *param)
437 {
438 	int n;
439 
440 	/*
441 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
442 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
443 	 */
444 	if (sess->ts_sess.ctx &&
445 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
446 		return true;
447 
448 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
449 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
450 		struct param_mem *mem = &param->u[n].mem;
451 
452 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
453 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
454 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
455 			continue;
456 		if (!mem->size)
457 			continue;
458 		if (mobj_is_sdp_mem(mem->mobj))
459 			return false;
460 	}
461 	return true;
462 }
463 #endif
464 
465 static void set_invoke_timeout(struct tee_ta_session *sess,
466 				      uint32_t cancel_req_to)
467 {
468 	TEE_Time current_time;
469 	TEE_Time cancel_time;
470 
471 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
472 		goto infinite;
473 
474 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
475 		goto infinite;
476 
477 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
478 			 &cancel_time.seconds))
479 		goto infinite;
480 
481 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
482 	if (cancel_time.millis > 1000) {
483 		if (ADD_OVERFLOW(current_time.seconds, 1,
484 				 &cancel_time.seconds))
485 			goto infinite;
486 
487 		cancel_time.seconds++;
488 		cancel_time.millis -= 1000;
489 	}
490 
491 	sess->cancel_time = cancel_time;
492 	return;
493 
494 infinite:
495 	sess->cancel_time.seconds = UINT32_MAX;
496 	sess->cancel_time.millis = UINT32_MAX;
497 }
498 
499 /*-----------------------------------------------------------------------------
500  * Close a Trusted Application and free available resources
501  *---------------------------------------------------------------------------*/
502 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
503 				struct tee_ta_session_head *open_sessions,
504 				const TEE_Identity *clnt_id)
505 {
506 	struct tee_ta_session *sess = NULL;
507 	struct tee_ta_ctx *ctx = NULL;
508 	struct ts_ctx *ts_ctx = NULL;
509 	bool keep_alive = false;
510 
511 	DMSG("csess 0x%" PRIxVA " id %u",
512 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
513 
514 	if (!csess)
515 		return TEE_ERROR_ITEM_NOT_FOUND;
516 
517 	sess = tee_ta_get_session(csess->id, true, open_sessions);
518 
519 	if (!sess) {
520 		EMSG("session 0x%" PRIxVA " to be removed is not found",
521 		     (vaddr_t)csess);
522 		return TEE_ERROR_ITEM_NOT_FOUND;
523 	}
524 
525 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
526 		tee_ta_put_session(sess);
527 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
528 	}
529 
530 	DMSG("Destroy session");
531 
532 	ts_ctx = sess->ts_sess.ctx;
533 	if (!ts_ctx) {
534 		destroy_session(sess, open_sessions);
535 		return TEE_SUCCESS;
536 	}
537 
538 	ctx = ts_to_ta_ctx(ts_ctx);
539 	if (ctx->panicked) {
540 		destroy_session(sess, open_sessions);
541 	} else {
542 		tee_ta_set_busy(ctx);
543 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
544 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
545 		destroy_session(sess, open_sessions);
546 		tee_ta_clear_busy(ctx);
547 	}
548 
549 	mutex_lock(&tee_ta_mutex);
550 
551 	if (ctx->ref_count <= 0)
552 		panic();
553 
554 	ctx->ref_count--;
555 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
556 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
557 	if (!ctx->ref_count && !keep_alive) {
558 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
559 		mutex_unlock(&tee_ta_mutex);
560 
561 		destroy_context(ctx);
562 	} else
563 		mutex_unlock(&tee_ta_mutex);
564 
565 	return TEE_SUCCESS;
566 }
567 
568 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
569 						   const TEE_UUID *uuid)
570 {
571 	struct tee_ta_ctx *ctx = NULL;
572 
573 	while (true) {
574 		ctx = tee_ta_context_find(uuid);
575 		if (!ctx)
576 			return TEE_ERROR_ITEM_NOT_FOUND;
577 
578 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
579 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
580 			break;
581 		/*
582 		 * Context is still initializing, wait here until it's
583 		 * fully initialized. Note that we're searching for the
584 		 * context again since it may have been removed while we
585 		 * where sleeping.
586 		 */
587 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
588 	}
589 
590 	/*
591 	 * If TA isn't single instance it should be loaded as new
592 	 * instance instead of doing anything with this instance.
593 	 * So tell the caller that we didn't find the TA it the
594 	 * caller will load a new instance.
595 	 */
596 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
597 		return TEE_ERROR_ITEM_NOT_FOUND;
598 
599 	/*
600 	 * The TA is single instance, if it isn't multi session we
601 	 * can't create another session unless its reference is zero
602 	 */
603 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
604 		return TEE_ERROR_BUSY;
605 
606 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
607 
608 	ctx->ref_count++;
609 	s->ts_sess.ctx = &ctx->ts_ctx;
610 	s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc;
611 	return TEE_SUCCESS;
612 }
613 
614 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
615 {
616 	struct tee_ta_session *last = NULL;
617 	uint32_t saved = 0;
618 	uint32_t id = 1;
619 
620 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
621 	if (last) {
622 		/* This value is less likely to be already used */
623 		id = last->id + 1;
624 		if (!id)
625 			id++; /* 0 is not valid */
626 	}
627 
628 	saved = id;
629 	do {
630 		if (!tee_ta_find_session_nolock(id, open_sessions))
631 			return id;
632 		id++;
633 		if (!id)
634 			id++;
635 	} while (id != saved);
636 
637 	return 0;
638 }
639 
640 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
641 				struct tee_ta_session_head *open_sessions,
642 				const TEE_UUID *uuid,
643 				struct tee_ta_session **sess)
644 {
645 	TEE_Result res;
646 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
647 
648 	*err = TEE_ORIGIN_TEE;
649 	if (!s)
650 		return TEE_ERROR_OUT_OF_MEMORY;
651 
652 	s->cancel_mask = true;
653 	condvar_init(&s->refc_cv);
654 	condvar_init(&s->lock_cv);
655 	s->lock_thread = THREAD_ID_INVALID;
656 	s->ref_count = 1;
657 
658 	mutex_lock(&tee_ta_mutex);
659 	s->id = new_session_id(open_sessions);
660 	if (!s->id) {
661 		res = TEE_ERROR_OVERFLOW;
662 		goto err_mutex_unlock;
663 	}
664 
665 	TAILQ_INSERT_TAIL(open_sessions, s, link);
666 
667 	/* Look for already loaded TA */
668 	res = tee_ta_init_session_with_context(s, uuid);
669 	mutex_unlock(&tee_ta_mutex);
670 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
671 		goto out;
672 
673 	/* Look for secure partition */
674 	res = stmm_init_session(uuid, s);
675 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
676 		goto out;
677 
678 	/* Look for pseudo TA */
679 	res = tee_ta_init_pseudo_ta_session(uuid, s);
680 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
681 		goto out;
682 
683 	/* Look for user TA */
684 	res = tee_ta_init_user_ta_session(uuid, s);
685 
686 out:
687 	if (!res) {
688 		*sess = s;
689 		return TEE_SUCCESS;
690 	}
691 
692 	mutex_lock(&tee_ta_mutex);
693 	TAILQ_REMOVE(open_sessions, s, link);
694 err_mutex_unlock:
695 	mutex_unlock(&tee_ta_mutex);
696 	free(s);
697 	return res;
698 }
699 
700 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
701 			       struct tee_ta_session **sess,
702 			       struct tee_ta_session_head *open_sessions,
703 			       const TEE_UUID *uuid,
704 			       const TEE_Identity *clnt_id,
705 			       uint32_t cancel_req_to,
706 			       struct tee_ta_param *param)
707 {
708 	TEE_Result res = TEE_SUCCESS;
709 	struct tee_ta_session *s = NULL;
710 	struct tee_ta_ctx *ctx = NULL;
711 	struct ts_ctx *ts_ctx = NULL;
712 	bool panicked = false;
713 	bool was_busy = false;
714 
715 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
716 	if (res != TEE_SUCCESS) {
717 		DMSG("init session failed 0x%x", res);
718 		return res;
719 	}
720 
721 	if (!check_params(s, param))
722 		return TEE_ERROR_BAD_PARAMETERS;
723 
724 	ts_ctx = s->ts_sess.ctx;
725 	if (ts_ctx)
726 		ctx = ts_to_ta_ctx(ts_ctx);
727 
728 	if (!ctx || ctx->panicked) {
729 		DMSG("panicked, call tee_ta_close_session()");
730 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
731 		*err = TEE_ORIGIN_TEE;
732 		return TEE_ERROR_TARGET_DEAD;
733 	}
734 
735 	*sess = s;
736 	/* Save identity of the owner of the session */
737 	s->clnt_id = *clnt_id;
738 
739 	if (tee_ta_try_set_busy(ctx)) {
740 		s->param = param;
741 		set_invoke_timeout(s, cancel_req_to);
742 		res = ts_ctx->ops->enter_open_session(&s->ts_sess);
743 		tee_ta_clear_busy(ctx);
744 	} else {
745 		/* Deadlock avoided */
746 		res = TEE_ERROR_BUSY;
747 		was_busy = true;
748 	}
749 
750 	panicked = ctx->panicked;
751 	s->param = NULL;
752 
753 	/*
754 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
755 	 * apart from panicking.
756 	 */
757 	if (panicked || was_busy)
758 		*err = TEE_ORIGIN_TEE;
759 	else
760 		*err = s->err_origin;
761 
762 	tee_ta_put_session(s);
763 	if (panicked || res != TEE_SUCCESS)
764 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
765 
766 	if (res != TEE_SUCCESS)
767 		EMSG("Failed. Return error 0x%x", res);
768 
769 	return res;
770 }
771 
772 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
773 				 struct tee_ta_session *sess,
774 				 const TEE_Identity *clnt_id,
775 				 uint32_t cancel_req_to, uint32_t cmd,
776 				 struct tee_ta_param *param)
777 {
778 	struct tee_ta_ctx *ta_ctx = NULL;
779 	struct ts_ctx *ts_ctx = NULL;
780 	TEE_Result res = TEE_SUCCESS;
781 
782 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
783 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
784 
785 	if (!check_params(sess, param))
786 		return TEE_ERROR_BAD_PARAMETERS;
787 
788 	ts_ctx = sess->ts_sess.ctx;
789 	if (!ts_ctx) {
790 		/* The context has been already destroyed */
791 		*err = TEE_ORIGIN_TEE;
792 		return TEE_ERROR_TARGET_DEAD;
793 	}
794 
795 	ta_ctx = ts_to_ta_ctx(ts_ctx);
796 	if (ta_ctx->panicked) {
797 		DMSG("Panicked !");
798 		destroy_ta_ctx_from_session(sess);
799 		*err = TEE_ORIGIN_TEE;
800 		return TEE_ERROR_TARGET_DEAD;
801 	}
802 
803 	tee_ta_set_busy(ta_ctx);
804 
805 	sess->param = param;
806 	set_invoke_timeout(sess, cancel_req_to);
807 	res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
808 
809 	sess->param = NULL;
810 	tee_ta_clear_busy(ta_ctx);
811 
812 	if (ta_ctx->panicked) {
813 		destroy_ta_ctx_from_session(sess);
814 		*err = TEE_ORIGIN_TEE;
815 		return TEE_ERROR_TARGET_DEAD;
816 	}
817 
818 	*err = sess->err_origin;
819 
820 	/* Short buffer is not an effective error case */
821 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
822 		DMSG("Error: %x of %d", res, *err);
823 
824 	return res;
825 }
826 
827 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
828 				 struct tee_ta_session *sess,
829 				 const TEE_Identity *clnt_id)
830 {
831 	*err = TEE_ORIGIN_TEE;
832 
833 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
834 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
835 
836 	sess->cancel = true;
837 	return TEE_SUCCESS;
838 }
839 
840 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
841 {
842 	TEE_Time current_time;
843 
844 	if (s->cancel_mask)
845 		return false;
846 
847 	if (s->cancel)
848 		return true;
849 
850 	if (s->cancel_time.seconds == UINT32_MAX)
851 		return false;
852 
853 	if (curr_time != NULL)
854 		current_time = *curr_time;
855 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
856 		return false;
857 
858 	if (current_time.seconds > s->cancel_time.seconds ||
859 	    (current_time.seconds == s->cancel_time.seconds &&
860 	     current_time.millis >= s->cancel_time.millis)) {
861 		return true;
862 	}
863 
864 	return false;
865 }
866 
867 #if defined(CFG_TA_GPROF_SUPPORT)
868 void tee_ta_gprof_sample_pc(vaddr_t pc)
869 {
870 	struct ts_session *s = ts_get_current_session();
871 	struct user_ta_ctx *utc = NULL;
872 	struct sample_buf *sbuf = NULL;
873 	TEE_Result res = 0;
874 	size_t idx = 0;
875 
876 	sbuf = s->sbuf;
877 	if (!sbuf || !sbuf->enabled)
878 		return; /* PC sampling is not enabled */
879 
880 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
881 	if (idx < sbuf->nsamples) {
882 		utc = to_user_ta_ctx(s->ctx);
883 		res = vm_check_access_rights(&utc->uctx,
884 					     TEE_MEMORY_ACCESS_READ |
885 					     TEE_MEMORY_ACCESS_WRITE |
886 					     TEE_MEMORY_ACCESS_ANY_OWNER,
887 					     (uaddr_t)&sbuf->samples[idx],
888 					     sizeof(*sbuf->samples));
889 		if (res != TEE_SUCCESS)
890 			return;
891 		sbuf->samples[idx]++;
892 	}
893 	sbuf->count++;
894 }
895 
896 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
897 				       uint64_t now)
898 {
899 	struct sample_buf *sbuf = s->sbuf;
900 
901 	if (!sbuf)
902 		return;
903 
904 	if (suspend) {
905 		assert(sbuf->usr_entered);
906 		sbuf->usr += now - sbuf->usr_entered;
907 		sbuf->usr_entered = 0;
908 	} else {
909 		assert(!sbuf->usr_entered);
910 		if (!now)
911 			now++; /* 0 is reserved */
912 		sbuf->usr_entered = now;
913 	}
914 }
915 
916 /*
917  * Update user-mode CPU time for the current session
918  * @suspend: true if session is being suspended (leaving user mode), false if
919  * it is resumed (entering user mode)
920  */
921 static void tee_ta_update_session_utime(bool suspend)
922 {
923 	struct ts_session *s = ts_get_current_session();
924 	uint64_t now = barrier_read_counter_timer();
925 
926 	gprof_update_session_utime(suspend, s, now);
927 }
928 
929 void tee_ta_update_session_utime_suspend(void)
930 {
931 	tee_ta_update_session_utime(true);
932 }
933 
934 void tee_ta_update_session_utime_resume(void)
935 {
936 	tee_ta_update_session_utime(false);
937 }
938 #endif
939 
940 #if defined(CFG_FTRACE_SUPPORT)
941 static void ftrace_update_times(bool suspend)
942 {
943 	struct ts_session *s = ts_get_current_session_may_fail();
944 	struct ftrace_buf *fbuf = NULL;
945 	uint64_t now = 0;
946 	uint32_t i = 0;
947 
948 	if (!s)
949 		return;
950 
951 	now = barrier_read_counter_timer();
952 
953 	fbuf = s->fbuf;
954 	if (!fbuf)
955 		return;
956 
957 	if (suspend) {
958 		fbuf->suspend_time = now;
959 	} else {
960 		for (i = 0; i <= fbuf->ret_idx; i++)
961 			fbuf->begin_time[i] += now - fbuf->suspend_time;
962 	}
963 }
964 
965 void tee_ta_ftrace_update_times_suspend(void)
966 {
967 	ftrace_update_times(true);
968 }
969 
970 void tee_ta_ftrace_update_times_resume(void)
971 {
972 	ftrace_update_times(false);
973 }
974 #endif
975 
976 bool is_ta_ctx(struct ts_ctx *ctx)
977 {
978 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
979 }
980