xref: /optee_os/core/kernel/tee_ta_manager.c (revision 5118efbe82358fd69fda6e0158a30e59f59ba09d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/panic.h>
11 #include <kernel/pseudo_ta.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tee_ta_manager.h>
16 #include <kernel/tee_time.h>
17 #include <kernel/thread.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/user_ta.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_cryp.h>
31 #include <tee/tee_svc_storage.h>
32 #include <trace.h>
33 #include <types_ext.h>
34 #include <user_ta_header.h>
35 #include <utee_types.h>
36 #include <util.h>
37 
38 /* This mutex protects the critical section in tee_ta_init_session */
39 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
40 /* This condvar is used when waiting for a TA context to become initialized */
41 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
42 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
43 
44 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
45 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
46 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
47 static size_t tee_ta_single_instance_count;
48 #endif
49 
50 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
51 static void lock_single_instance(void)
52 {
53 }
54 
55 static void unlock_single_instance(void)
56 {
57 }
58 
59 static bool has_single_instance_lock(void)
60 {
61 	return false;
62 }
63 #else
64 static void lock_single_instance(void)
65 {
66 	/* Requires tee_ta_mutex to be held */
67 	if (tee_ta_single_instance_thread != thread_get_id()) {
68 		/* Wait until the single-instance lock is available. */
69 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
70 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
71 
72 		tee_ta_single_instance_thread = thread_get_id();
73 		assert(tee_ta_single_instance_count == 0);
74 	}
75 
76 	tee_ta_single_instance_count++;
77 }
78 
79 static void unlock_single_instance(void)
80 {
81 	/* Requires tee_ta_mutex to be held */
82 	assert(tee_ta_single_instance_thread == thread_get_id());
83 	assert(tee_ta_single_instance_count > 0);
84 
85 	tee_ta_single_instance_count--;
86 	if (tee_ta_single_instance_count == 0) {
87 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
88 		condvar_signal(&tee_ta_cv);
89 	}
90 }
91 
92 static bool has_single_instance_lock(void)
93 {
94 	/* Requires tee_ta_mutex to be held */
95 	return tee_ta_single_instance_thread == thread_get_id();
96 }
97 #endif
98 
99 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
100 {
101 	bool rc = true;
102 
103 	if (ctx->flags & TA_FLAG_CONCURRENT)
104 		return true;
105 
106 	mutex_lock(&tee_ta_mutex);
107 
108 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
109 		lock_single_instance();
110 
111 	if (has_single_instance_lock()) {
112 		if (ctx->busy) {
113 			/*
114 			 * We're holding the single-instance lock and the
115 			 * TA is busy, as waiting now would only cause a
116 			 * dead-lock, we release the lock and return false.
117 			 */
118 			rc = false;
119 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
120 				unlock_single_instance();
121 		}
122 	} else {
123 		/*
124 		 * We're not holding the single-instance lock, we're free to
125 		 * wait for the TA to become available.
126 		 */
127 		while (ctx->busy)
128 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
129 	}
130 
131 	/* Either it's already true or we should set it to true */
132 	ctx->busy = true;
133 
134 	mutex_unlock(&tee_ta_mutex);
135 	return rc;
136 }
137 
138 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
139 {
140 	if (!tee_ta_try_set_busy(ctx))
141 		panic();
142 }
143 
144 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
145 {
146 	if (ctx->flags & TA_FLAG_CONCURRENT)
147 		return;
148 
149 	mutex_lock(&tee_ta_mutex);
150 
151 	assert(ctx->busy);
152 	ctx->busy = false;
153 	condvar_signal(&ctx->busy_cv);
154 
155 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
156 		unlock_single_instance();
157 
158 	ctx->initializing = false;
159 
160 	mutex_unlock(&tee_ta_mutex);
161 }
162 
163 static void dec_session_ref_count(struct tee_ta_session *s)
164 {
165 	assert(s->ref_count > 0);
166 	s->ref_count--;
167 	if (s->ref_count == 1)
168 		condvar_signal(&s->refc_cv);
169 }
170 
171 void tee_ta_put_session(struct tee_ta_session *s)
172 {
173 	mutex_lock(&tee_ta_mutex);
174 
175 	if (s->lock_thread == thread_get_id()) {
176 		s->lock_thread = THREAD_ID_INVALID;
177 		condvar_signal(&s->lock_cv);
178 	}
179 	dec_session_ref_count(s);
180 
181 	mutex_unlock(&tee_ta_mutex);
182 }
183 
184 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
185 			struct tee_ta_session_head *open_sessions)
186 {
187 	struct tee_ta_session *s = NULL;
188 	struct tee_ta_session *found = NULL;
189 
190 	TAILQ_FOREACH(s, open_sessions, link) {
191 		if (s->id == id) {
192 			found = s;
193 			break;
194 		}
195 	}
196 
197 	return found;
198 }
199 
200 struct tee_ta_session *tee_ta_find_session(uint32_t id,
201 			struct tee_ta_session_head *open_sessions)
202 {
203 	struct tee_ta_session *s = NULL;
204 
205 	mutex_lock(&tee_ta_mutex);
206 
207 	s = tee_ta_find_session_nolock(id, open_sessions);
208 
209 	mutex_unlock(&tee_ta_mutex);
210 
211 	return s;
212 }
213 
214 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
215 			struct tee_ta_session_head *open_sessions)
216 {
217 	struct tee_ta_session *s;
218 
219 	mutex_lock(&tee_ta_mutex);
220 
221 	while (true) {
222 		s = tee_ta_find_session_nolock(id, open_sessions);
223 		if (!s)
224 			break;
225 		if (s->unlink) {
226 			s = NULL;
227 			break;
228 		}
229 		s->ref_count++;
230 		if (!exclusive)
231 			break;
232 
233 		assert(s->lock_thread != thread_get_id());
234 
235 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
236 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
237 
238 		if (s->unlink) {
239 			dec_session_ref_count(s);
240 			s = NULL;
241 			break;
242 		}
243 
244 		s->lock_thread = thread_get_id();
245 		break;
246 	}
247 
248 	mutex_unlock(&tee_ta_mutex);
249 	return s;
250 }
251 
252 static void tee_ta_unlink_session(struct tee_ta_session *s,
253 			struct tee_ta_session_head *open_sessions)
254 {
255 	mutex_lock(&tee_ta_mutex);
256 
257 	assert(s->ref_count >= 1);
258 	assert(s->lock_thread == thread_get_id());
259 	assert(!s->unlink);
260 
261 	s->unlink = true;
262 	condvar_broadcast(&s->lock_cv);
263 
264 	while (s->ref_count != 1)
265 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
266 
267 	TAILQ_REMOVE(open_sessions, s, link);
268 
269 	mutex_unlock(&tee_ta_mutex);
270 }
271 
272 static void destroy_session(struct tee_ta_session *s,
273 			    struct tee_ta_session_head *open_sessions)
274 {
275 #if defined(CFG_FTRACE_SUPPORT)
276 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
277 		ts_push_current_session(&s->ts_sess);
278 		s->ts_sess.fbuf = NULL;
279 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
280 		ts_pop_current_session();
281 	}
282 #endif
283 
284 	tee_ta_unlink_session(s, open_sessions);
285 #if defined(CFG_TA_GPROF_SUPPORT)
286 	free(s->ts_sess.sbuf);
287 #endif
288 	free(s);
289 }
290 
291 static void destroy_context(struct tee_ta_ctx *ctx)
292 {
293 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
294 
295 	condvar_destroy(&ctx->busy_cv);
296 	pgt_flush_ctx(&ctx->ts_ctx);
297 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
298 }
299 
300 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
301 {
302 	struct tee_ta_session *sess = NULL;
303 	struct tee_ta_session_head *open_sessions = NULL;
304 	struct tee_ta_ctx *ctx = NULL;
305 	struct user_ta_ctx *utc = NULL;
306 	size_t count = 1; /* start counting the references to the context */
307 
308 	DMSG("Remove references to context (0x%" PRIxVA ")",
309 	     (vaddr_t)s->ts_sess.ctx);
310 
311 	mutex_lock(&tee_ta_mutex);
312 	nsec_sessions_list_head(&open_sessions);
313 
314 	/*
315 	 * Next two loops will remove all references to the context which is
316 	 * about to be destroyed, but avoiding such operation to the current
317 	 * session. That will be done later in this function, only after
318 	 * the context will be properly destroyed.
319 	 */
320 
321 	/*
322 	 * Scan the entire list of opened sessions by the clients from
323 	 * non-secure world.
324 	 */
325 	TAILQ_FOREACH(sess, open_sessions, link) {
326 		if (sess->ts_sess.ctx == s->ts_sess.ctx && sess != s) {
327 			sess->ts_sess.ctx = NULL;
328 			count++;
329 		}
330 	}
331 
332 	/*
333 	 * Scan all sessions opened from secure side by searching through
334 	 * all available TA instances and for each context, scan all opened
335 	 * sessions.
336 	 */
337 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
338 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
339 			utc = to_user_ta_ctx(&ctx->ts_ctx);
340 
341 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
342 				if (sess->ts_sess.ctx == s->ts_sess.ctx &&
343 				    sess != s) {
344 					sess->ts_sess.ctx = NULL;
345 					count++;
346 				}
347 			}
348 		}
349 	}
350 
351 	ctx = to_ta_ctx(s->ts_sess.ctx);
352 	assert(count == ctx->ref_count);
353 
354 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
355 	mutex_unlock(&tee_ta_mutex);
356 
357 	destroy_context(ctx);
358 
359 	s->ts_sess.ctx = NULL;
360 }
361 
362 /*
363  * tee_ta_context_find - Find TA in session list based on a UUID (input)
364  * Returns a pointer to the session
365  */
366 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
367 {
368 	struct tee_ta_ctx *ctx;
369 
370 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
371 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
372 			return ctx;
373 	}
374 
375 	return NULL;
376 }
377 
378 /* check if requester (client ID) matches session initial client */
379 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
380 {
381 	if (id == KERN_IDENTITY)
382 		return TEE_SUCCESS;
383 
384 	if (id == NSAPP_IDENTITY) {
385 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
386 			DMSG("nsec tries to hijack TA session");
387 			return TEE_ERROR_ACCESS_DENIED;
388 		}
389 		return TEE_SUCCESS;
390 	}
391 
392 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
393 		DMSG("client id mismatch");
394 		return TEE_ERROR_ACCESS_DENIED;
395 	}
396 	return TEE_SUCCESS;
397 }
398 
399 /*
400  * Check if invocation parameters matches TA properties
401  *
402  * @s - current session handle
403  * @param - already identified memory references hold a valid 'mobj'.
404  *
405  * Policy:
406  * - All TAs can access 'non-secure' shared memory.
407  * - All TAs can access TEE private memory (seccpy)
408  * - Only SDP flagged TAs can accept SDP memory references.
409  */
410 #ifndef CFG_SECURE_DATA_PATH
411 static bool check_params(struct tee_ta_session *sess __unused,
412 			 struct tee_ta_param *param __unused)
413 {
414 	/*
415 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
416 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
417 	 * permissions regarding memory reference parameters.
418 	 */
419 	return true;
420 }
421 #else
422 static bool check_params(struct tee_ta_session *sess,
423 			 struct tee_ta_param *param)
424 {
425 	int n;
426 
427 	/*
428 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
429 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
430 	 */
431 	if (sess->ts_sess.ctx &&
432 	    to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
433 		return true;
434 
435 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
436 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
437 		struct param_mem *mem = &param->u[n].mem;
438 
439 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
440 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
441 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
442 			continue;
443 		if (!mem->size)
444 			continue;
445 		if (mobj_is_sdp_mem(mem->mobj))
446 			return false;
447 	}
448 	return true;
449 }
450 #endif
451 
452 static void set_invoke_timeout(struct tee_ta_session *sess,
453 				      uint32_t cancel_req_to)
454 {
455 	TEE_Time current_time;
456 	TEE_Time cancel_time;
457 
458 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
459 		goto infinite;
460 
461 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
462 		goto infinite;
463 
464 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
465 			 &cancel_time.seconds))
466 		goto infinite;
467 
468 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
469 	if (cancel_time.millis > 1000) {
470 		if (ADD_OVERFLOW(current_time.seconds, 1,
471 				 &cancel_time.seconds))
472 			goto infinite;
473 
474 		cancel_time.seconds++;
475 		cancel_time.millis -= 1000;
476 	}
477 
478 	sess->cancel_time = cancel_time;
479 	return;
480 
481 infinite:
482 	sess->cancel_time.seconds = UINT32_MAX;
483 	sess->cancel_time.millis = UINT32_MAX;
484 }
485 
486 /*-----------------------------------------------------------------------------
487  * Close a Trusted Application and free available resources
488  *---------------------------------------------------------------------------*/
489 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
490 				struct tee_ta_session_head *open_sessions,
491 				const TEE_Identity *clnt_id)
492 {
493 	struct tee_ta_session *sess;
494 	struct tee_ta_ctx *ctx;
495 	bool keep_alive;
496 
497 	DMSG("csess 0x%" PRIxVA " id %u",
498 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
499 
500 	if (!csess)
501 		return TEE_ERROR_ITEM_NOT_FOUND;
502 
503 	sess = tee_ta_get_session(csess->id, true, open_sessions);
504 
505 	if (!sess) {
506 		EMSG("session 0x%" PRIxVA " to be removed is not found",
507 		     (vaddr_t)csess);
508 		return TEE_ERROR_ITEM_NOT_FOUND;
509 	}
510 
511 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
512 		tee_ta_put_session(sess);
513 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
514 	}
515 
516 	DMSG("Destroy session");
517 
518 	if (!sess->ts_sess.ctx) {
519 		destroy_session(sess, open_sessions);
520 		return TEE_SUCCESS;
521 	}
522 
523 	ctx = to_ta_ctx(sess->ts_sess.ctx);
524 	if (ctx->panicked) {
525 		destroy_session(sess, open_sessions);
526 	} else {
527 		tee_ta_set_busy(ctx);
528 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
529 		ctx->ts_ctx.ops->enter_close_session(&sess->ts_sess);
530 		destroy_session(sess, open_sessions);
531 		tee_ta_clear_busy(ctx);
532 	}
533 
534 	mutex_lock(&tee_ta_mutex);
535 
536 	if (ctx->ref_count <= 0)
537 		panic();
538 
539 	ctx->ref_count--;
540 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
541 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
542 	if (!ctx->ref_count && !keep_alive) {
543 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
544 		mutex_unlock(&tee_ta_mutex);
545 
546 		destroy_context(ctx);
547 	} else
548 		mutex_unlock(&tee_ta_mutex);
549 
550 	return TEE_SUCCESS;
551 }
552 
553 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
554 						   const TEE_UUID *uuid)
555 {
556 	struct tee_ta_ctx *ctx = NULL;
557 
558 	while (true) {
559 		ctx = tee_ta_context_find(uuid);
560 		if (!ctx)
561 			return TEE_ERROR_ITEM_NOT_FOUND;
562 
563 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
564 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
565 			break;
566 		/*
567 		 * Context is still initializing, wait here until it's
568 		 * fully initialized. Note that we're searching for the
569 		 * context again since it may have been removed while we
570 		 * where sleeping.
571 		 */
572 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
573 	}
574 
575 	/*
576 	 * If TA isn't single instance it should be loaded as new
577 	 * instance instead of doing anything with this instance.
578 	 * So tell the caller that we didn't find the TA it the
579 	 * caller will load a new instance.
580 	 */
581 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
582 		return TEE_ERROR_ITEM_NOT_FOUND;
583 
584 	/*
585 	 * The TA is single instance, if it isn't multi session we
586 	 * can't create another session unless its reference is zero
587 	 */
588 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
589 		return TEE_ERROR_BUSY;
590 
591 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
592 
593 	ctx->ref_count++;
594 	s->ts_sess.ctx = &ctx->ts_ctx;
595 	return TEE_SUCCESS;
596 }
597 
598 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
599 {
600 	struct tee_ta_session *last = NULL;
601 	uint32_t saved = 0;
602 	uint32_t id = 1;
603 
604 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
605 	if (last) {
606 		/* This value is less likely to be already used */
607 		id = last->id + 1;
608 		if (!id)
609 			id++; /* 0 is not valid */
610 	}
611 
612 	saved = id;
613 	do {
614 		if (!tee_ta_find_session_nolock(id, open_sessions))
615 			return id;
616 		id++;
617 		if (!id)
618 			id++;
619 	} while (id != saved);
620 
621 	return 0;
622 }
623 
624 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
625 				struct tee_ta_session_head *open_sessions,
626 				const TEE_UUID *uuid,
627 				struct tee_ta_session **sess)
628 {
629 	TEE_Result res;
630 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
631 
632 	*err = TEE_ORIGIN_TEE;
633 	if (!s)
634 		return TEE_ERROR_OUT_OF_MEMORY;
635 
636 	s->cancel_mask = true;
637 	condvar_init(&s->refc_cv);
638 	condvar_init(&s->lock_cv);
639 	s->lock_thread = THREAD_ID_INVALID;
640 	s->ref_count = 1;
641 
642 	mutex_lock(&tee_ta_mutex);
643 	s->id = new_session_id(open_sessions);
644 	if (!s->id) {
645 		res = TEE_ERROR_OVERFLOW;
646 		goto err_mutex_unlock;
647 	}
648 
649 	TAILQ_INSERT_TAIL(open_sessions, s, link);
650 
651 	/* Look for already loaded TA */
652 	res = tee_ta_init_session_with_context(s, uuid);
653 	mutex_unlock(&tee_ta_mutex);
654 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
655 		goto out;
656 
657 	/* Look for secure partition */
658 	res = stmm_init_session(uuid, s);
659 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
660 		goto out;
661 
662 	/* Look for pseudo TA */
663 	res = tee_ta_init_pseudo_ta_session(uuid, s);
664 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
665 		goto out;
666 
667 	/* Look for user TA */
668 	res = tee_ta_init_user_ta_session(uuid, s);
669 
670 out:
671 	if (!res) {
672 		*sess = s;
673 		return TEE_SUCCESS;
674 	}
675 
676 	mutex_lock(&tee_ta_mutex);
677 	TAILQ_REMOVE(open_sessions, s, link);
678 err_mutex_unlock:
679 	mutex_unlock(&tee_ta_mutex);
680 	free(s);
681 	return res;
682 }
683 
684 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
685 			       struct tee_ta_session **sess,
686 			       struct tee_ta_session_head *open_sessions,
687 			       const TEE_UUID *uuid,
688 			       const TEE_Identity *clnt_id,
689 			       uint32_t cancel_req_to,
690 			       struct tee_ta_param *param)
691 {
692 	TEE_Result res = TEE_SUCCESS;
693 	struct tee_ta_session *s = NULL;
694 	struct tee_ta_ctx *ctx = NULL;
695 	bool panicked = false;
696 	bool was_busy = false;
697 
698 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
699 	if (res != TEE_SUCCESS) {
700 		DMSG("init session failed 0x%x", res);
701 		return res;
702 	}
703 
704 	if (!check_params(s, param))
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	if (s->ts_sess.ctx)
708 		ctx = to_ta_ctx(s->ts_sess.ctx);
709 
710 	if (!ctx || ctx->panicked) {
711 		DMSG("panicked, call tee_ta_close_session()");
712 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
713 		*err = TEE_ORIGIN_TEE;
714 		return TEE_ERROR_TARGET_DEAD;
715 	}
716 
717 	*sess = s;
718 	/* Save identity of the owner of the session */
719 	s->clnt_id = *clnt_id;
720 
721 	if (tee_ta_try_set_busy(ctx)) {
722 		s->param = param;
723 		set_invoke_timeout(s, cancel_req_to);
724 		res = ctx->ts_ctx.ops->enter_open_session(&s->ts_sess);
725 		tee_ta_clear_busy(ctx);
726 	} else {
727 		/* Deadlock avoided */
728 		res = TEE_ERROR_BUSY;
729 		was_busy = true;
730 	}
731 
732 	panicked = ctx->panicked;
733 	s->param = NULL;
734 
735 	tee_ta_put_session(s);
736 	if (panicked || (res != TEE_SUCCESS))
737 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
738 
739 	/*
740 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
741 	 * apart from panicking.
742 	 */
743 	if (panicked || was_busy)
744 		*err = TEE_ORIGIN_TEE;
745 	else
746 		*err = s->err_origin;
747 
748 	if (res != TEE_SUCCESS)
749 		EMSG("Failed. Return error 0x%x", res);
750 
751 	return res;
752 }
753 
754 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
755 				 struct tee_ta_session *sess,
756 				 const TEE_Identity *clnt_id,
757 				 uint32_t cancel_req_to, uint32_t cmd,
758 				 struct tee_ta_param *param)
759 {
760 	struct tee_ta_ctx *ta_ctx = NULL;
761 	TEE_Result res = TEE_SUCCESS;
762 
763 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
764 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
765 
766 	if (!check_params(sess, param))
767 		return TEE_ERROR_BAD_PARAMETERS;
768 
769 	if (!sess->ts_sess.ctx) {
770 		/* The context has been already destroyed */
771 		*err = TEE_ORIGIN_TEE;
772 		return TEE_ERROR_TARGET_DEAD;
773 	}
774 
775 	ta_ctx = to_ta_ctx(sess->ts_sess.ctx);
776 	if (ta_ctx->panicked) {
777 		DMSG("Panicked !");
778 		destroy_ta_ctx_from_session(sess);
779 		*err = TEE_ORIGIN_TEE;
780 		return TEE_ERROR_TARGET_DEAD;
781 	}
782 
783 	tee_ta_set_busy(ta_ctx);
784 
785 	sess->param = param;
786 	set_invoke_timeout(sess, cancel_req_to);
787 	res = ta_ctx->ts_ctx.ops->enter_invoke_cmd(&sess->ts_sess, cmd);
788 
789 	sess->param = NULL;
790 	tee_ta_clear_busy(ta_ctx);
791 
792 	if (ta_ctx->panicked) {
793 		destroy_ta_ctx_from_session(sess);
794 		*err = TEE_ORIGIN_TEE;
795 		return TEE_ERROR_TARGET_DEAD;
796 	}
797 
798 	*err = sess->err_origin;
799 
800 	/* Short buffer is not an effective error case */
801 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
802 		DMSG("Error: %x of %d", res, *err);
803 
804 	return res;
805 }
806 
807 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
808 				 struct tee_ta_session *sess,
809 				 const TEE_Identity *clnt_id)
810 {
811 	*err = TEE_ORIGIN_TEE;
812 
813 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
814 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
815 
816 	sess->cancel = true;
817 	return TEE_SUCCESS;
818 }
819 
820 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
821 {
822 	TEE_Time current_time;
823 
824 	if (s->cancel_mask)
825 		return false;
826 
827 	if (s->cancel)
828 		return true;
829 
830 	if (s->cancel_time.seconds == UINT32_MAX)
831 		return false;
832 
833 	if (curr_time != NULL)
834 		current_time = *curr_time;
835 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
836 		return false;
837 
838 	if (current_time.seconds > s->cancel_time.seconds ||
839 	    (current_time.seconds == s->cancel_time.seconds &&
840 	     current_time.millis >= s->cancel_time.millis)) {
841 		return true;
842 	}
843 
844 	return false;
845 }
846 
847 #if defined(CFG_TA_GPROF_SUPPORT)
848 void tee_ta_gprof_sample_pc(vaddr_t pc)
849 {
850 	struct ts_session *s = ts_get_current_session();
851 	struct user_ta_ctx *utc = NULL;
852 	struct sample_buf *sbuf = NULL;
853 	TEE_Result res = 0;
854 	size_t idx = 0;
855 
856 	sbuf = s->sbuf;
857 	if (!sbuf || !sbuf->enabled)
858 		return; /* PC sampling is not enabled */
859 
860 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
861 	if (idx < sbuf->nsamples) {
862 		utc = to_user_ta_ctx(s->ctx);
863 		res = vm_check_access_rights(&utc->uctx,
864 					     TEE_MEMORY_ACCESS_READ |
865 					     TEE_MEMORY_ACCESS_WRITE |
866 					     TEE_MEMORY_ACCESS_ANY_OWNER,
867 					     (uaddr_t)&sbuf->samples[idx],
868 					     sizeof(*sbuf->samples));
869 		if (res != TEE_SUCCESS)
870 			return;
871 		sbuf->samples[idx]++;
872 	}
873 	sbuf->count++;
874 }
875 
876 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
877 				       uint64_t now)
878 {
879 	struct sample_buf *sbuf = s->sbuf;
880 
881 	if (!sbuf)
882 		return;
883 
884 	if (suspend) {
885 		assert(sbuf->usr_entered);
886 		sbuf->usr += now - sbuf->usr_entered;
887 		sbuf->usr_entered = 0;
888 	} else {
889 		assert(!sbuf->usr_entered);
890 		if (!now)
891 			now++; /* 0 is reserved */
892 		sbuf->usr_entered = now;
893 	}
894 }
895 
896 /*
897  * Update user-mode CPU time for the current session
898  * @suspend: true if session is being suspended (leaving user mode), false if
899  * it is resumed (entering user mode)
900  */
901 static void tee_ta_update_session_utime(bool suspend)
902 {
903 	struct ts_session *s = ts_get_current_session();
904 	uint64_t now = read_cntpct();
905 
906 	gprof_update_session_utime(suspend, s, now);
907 }
908 
909 void tee_ta_update_session_utime_suspend(void)
910 {
911 	tee_ta_update_session_utime(true);
912 }
913 
914 void tee_ta_update_session_utime_resume(void)
915 {
916 	tee_ta_update_session_utime(false);
917 }
918 #endif
919 
920 #if defined(CFG_FTRACE_SUPPORT)
921 static void ftrace_update_times(bool suspend)
922 {
923 	struct ts_session *s = ts_get_current_session();
924 	struct ftrace_buf *fbuf = NULL;
925 	uint64_t now = 0;
926 	uint32_t i = 0;
927 
928 	now = read_cntpct();
929 
930 	fbuf = s->fbuf;
931 	if (!fbuf)
932 		return;
933 
934 	if (suspend) {
935 		fbuf->suspend_time = now;
936 	} else {
937 		for (i = 0; i <= fbuf->ret_idx; i++)
938 			fbuf->begin_time[i] += now - fbuf->suspend_time;
939 	}
940 }
941 
942 void tee_ta_ftrace_update_times_suspend(void)
943 {
944 	ftrace_update_times(true);
945 }
946 
947 void tee_ta_ftrace_update_times_resume(void)
948 {
949 	ftrace_update_times(false);
950 }
951 #endif
952 
953 bool is_ta_ctx(struct ts_ctx *ctx)
954 {
955 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
956 }
957