xref: /optee_os/core/kernel/tee_ta_manager.c (revision 8bbd9b374a51a1b8617796aae8a70c271543357f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/mutex.h>
14 #include <kernel/panic.h>
15 #include <kernel/pseudo_ta.h>
16 #include <kernel/tee_common.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/tee_time.h>
20 #include <kernel/thread.h>
21 #include <kernel/user_ta.h>
22 #include <mm/core_mmu.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mmu.h>
26 #include <tee/entry_std.h>
27 #include <tee/tee_svc_cryp.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_storage.h>
30 #include <tee_api_types.h>
31 #include <trace.h>
32 #include <utee_types.h>
33 #include <util.h>
34 
35 /* This mutex protects the critical section in tee_ta_init_session */
36 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
37 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
38 
39 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
40 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
41 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
42 static size_t tee_ta_single_instance_count;
43 #endif
44 
45 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
46 static void lock_single_instance(void)
47 {
48 }
49 
50 static void unlock_single_instance(void)
51 {
52 }
53 
54 static bool has_single_instance_lock(void)
55 {
56 	return false;
57 }
58 #else
59 static void lock_single_instance(void)
60 {
61 	/* Requires tee_ta_mutex to be held */
62 	if (tee_ta_single_instance_thread != thread_get_id()) {
63 		/* Wait until the single-instance lock is available. */
64 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
65 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
66 
67 		tee_ta_single_instance_thread = thread_get_id();
68 		assert(tee_ta_single_instance_count == 0);
69 	}
70 
71 	tee_ta_single_instance_count++;
72 }
73 
74 static void unlock_single_instance(void)
75 {
76 	/* Requires tee_ta_mutex to be held */
77 	assert(tee_ta_single_instance_thread == thread_get_id());
78 	assert(tee_ta_single_instance_count > 0);
79 
80 	tee_ta_single_instance_count--;
81 	if (tee_ta_single_instance_count == 0) {
82 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
83 		condvar_signal(&tee_ta_cv);
84 	}
85 }
86 
87 static bool has_single_instance_lock(void)
88 {
89 	/* Requires tee_ta_mutex to be held */
90 	return tee_ta_single_instance_thread == thread_get_id();
91 }
92 #endif
93 
94 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
95 {
96 	bool rc = true;
97 
98 	if (ctx->flags & TA_FLAG_CONCURRENT)
99 		return true;
100 
101 	mutex_lock(&tee_ta_mutex);
102 
103 	if (ctx->initializing) {
104 		/*
105 		 * Context is still initializing and flags cannot be relied
106 		 * on for user TAs. Wait here until it's initialized.
107 		 */
108 		while (ctx->busy)
109 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
110 	}
111 
112 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
113 		lock_single_instance();
114 
115 	if (has_single_instance_lock()) {
116 		if (ctx->busy) {
117 			/*
118 			 * We're holding the single-instance lock and the
119 			 * TA is busy, as waiting now would only cause a
120 			 * dead-lock, we release the lock and return false.
121 			 */
122 			rc = false;
123 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
124 				unlock_single_instance();
125 		}
126 	} else {
127 		/*
128 		 * We're not holding the single-instance lock, we're free to
129 		 * wait for the TA to become available.
130 		 */
131 		while (ctx->busy)
132 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
133 	}
134 
135 	/* Either it's already true or we should set it to true */
136 	ctx->busy = true;
137 
138 	mutex_unlock(&tee_ta_mutex);
139 	return rc;
140 }
141 
142 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
143 {
144 	if (!tee_ta_try_set_busy(ctx))
145 		panic();
146 }
147 
148 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
149 {
150 	if (ctx->flags & TA_FLAG_CONCURRENT)
151 		return;
152 
153 	mutex_lock(&tee_ta_mutex);
154 
155 	assert(ctx->busy);
156 	ctx->busy = false;
157 	condvar_signal(&ctx->busy_cv);
158 
159 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
160 		unlock_single_instance();
161 
162 	ctx->initializing = false;
163 
164 	mutex_unlock(&tee_ta_mutex);
165 }
166 
167 static void dec_session_ref_count(struct tee_ta_session *s)
168 {
169 	assert(s->ref_count > 0);
170 	s->ref_count--;
171 	if (s->ref_count == 1)
172 		condvar_signal(&s->refc_cv);
173 }
174 
175 void tee_ta_put_session(struct tee_ta_session *s)
176 {
177 	mutex_lock(&tee_ta_mutex);
178 
179 	if (s->lock_thread == thread_get_id()) {
180 		s->lock_thread = THREAD_ID_INVALID;
181 		condvar_signal(&s->lock_cv);
182 	}
183 	dec_session_ref_count(s);
184 
185 	mutex_unlock(&tee_ta_mutex);
186 }
187 
188 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
189 			struct tee_ta_session_head *open_sessions)
190 {
191 	struct tee_ta_session *s = NULL;
192 	struct tee_ta_session *found = NULL;
193 
194 	TAILQ_FOREACH(s, open_sessions, link) {
195 		if (s->id == id) {
196 			found = s;
197 			break;
198 		}
199 	}
200 
201 	return found;
202 }
203 
204 struct tee_ta_session *tee_ta_find_session(uint32_t id,
205 			struct tee_ta_session_head *open_sessions)
206 {
207 	struct tee_ta_session *s = NULL;
208 
209 	mutex_lock(&tee_ta_mutex);
210 
211 	s = tee_ta_find_session_nolock(id, open_sessions);
212 
213 	mutex_unlock(&tee_ta_mutex);
214 
215 	return s;
216 }
217 
218 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
219 			struct tee_ta_session_head *open_sessions)
220 {
221 	struct tee_ta_session *s;
222 
223 	mutex_lock(&tee_ta_mutex);
224 
225 	while (true) {
226 		s = tee_ta_find_session_nolock(id, open_sessions);
227 		if (!s)
228 			break;
229 		if (s->unlink) {
230 			s = NULL;
231 			break;
232 		}
233 		s->ref_count++;
234 		if (!exclusive)
235 			break;
236 
237 		assert(s->lock_thread != thread_get_id());
238 
239 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
240 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
241 
242 		if (s->unlink) {
243 			dec_session_ref_count(s);
244 			s = NULL;
245 			break;
246 		}
247 
248 		s->lock_thread = thread_get_id();
249 		break;
250 	}
251 
252 	mutex_unlock(&tee_ta_mutex);
253 	return s;
254 }
255 
256 static void tee_ta_unlink_session(struct tee_ta_session *s,
257 			struct tee_ta_session_head *open_sessions)
258 {
259 	mutex_lock(&tee_ta_mutex);
260 
261 	assert(s->ref_count >= 1);
262 	assert(s->lock_thread == thread_get_id());
263 	assert(!s->unlink);
264 
265 	s->unlink = true;
266 	condvar_broadcast(&s->lock_cv);
267 
268 	while (s->ref_count != 1)
269 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
270 
271 	TAILQ_REMOVE(open_sessions, s, link);
272 
273 	mutex_unlock(&tee_ta_mutex);
274 }
275 
276 static void destroy_session(struct tee_ta_session *s,
277 			    struct tee_ta_session_head *open_sessions)
278 {
279 #if defined(CFG_TA_FTRACE_SUPPORT)
280 	if (s->ctx && s->ctx->ops->dump_ftrace) {
281 		tee_ta_push_current_session(s);
282 		s->ctx->ops->dump_ftrace(s->ctx);
283 		tee_ta_pop_current_session();
284 	}
285 #endif
286 
287 	tee_ta_unlink_session(s, open_sessions);
288 #if defined(CFG_TA_GPROF_SUPPORT)
289 	free(s->sbuf);
290 #endif
291 	free(s);
292 }
293 
294 static void destroy_context(struct tee_ta_ctx *ctx)
295 {
296 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
297 
298 	condvar_destroy(&ctx->busy_cv);
299 	pgt_flush_ctx(ctx);
300 	ctx->ops->destroy(ctx);
301 }
302 
303 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
304 {
305 	struct tee_ta_session *sess = NULL;
306 	struct tee_ta_session_head *open_sessions = NULL;
307 	struct tee_ta_ctx *ctx = NULL;
308 	struct user_ta_ctx *utc = NULL;
309 	size_t count = 1; /* start counting the references to the context */
310 
311 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
312 
313 	mutex_lock(&tee_ta_mutex);
314 	nsec_sessions_list_head(&open_sessions);
315 
316 	/*
317 	 * Next two loops will remove all references to the context which is
318 	 * about to be destroyed, but avoiding such operation to the current
319 	 * session. That will be done later in this function, only after
320 	 * the context will be properly destroyed.
321 	 */
322 
323 	/*
324 	 * Scan the entire list of opened sessions by the clients from
325 	 * non-secure world.
326 	 */
327 	TAILQ_FOREACH(sess, open_sessions, link) {
328 		if (sess->ctx == s->ctx && sess != s) {
329 			sess->ctx = NULL;
330 			count++;
331 		}
332 	}
333 
334 	/*
335 	 * Scan all sessions opened from secure side by searching through
336 	 * all available TA instances and for each context, scan all opened
337 	 * sessions.
338 	 */
339 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
340 		if (is_user_ta_ctx(ctx)) {
341 			utc = to_user_ta_ctx(ctx);
342 
343 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
344 				if (sess->ctx == s->ctx && sess != s) {
345 					sess->ctx = NULL;
346 					count++;
347 				}
348 			}
349 		}
350 	}
351 
352 	assert(count == s->ctx->ref_count);
353 
354 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
355 	mutex_unlock(&tee_ta_mutex);
356 
357 	destroy_context(s->ctx);
358 
359 	s->ctx = NULL;
360 }
361 
362 /*
363  * tee_ta_context_find - Find TA in session list based on a UUID (input)
364  * Returns a pointer to the session
365  */
366 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
367 {
368 	struct tee_ta_ctx *ctx;
369 
370 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
371 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
372 			return ctx;
373 	}
374 
375 	return NULL;
376 }
377 
378 /* check if requester (client ID) matches session initial client */
379 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
380 {
381 	if (id == KERN_IDENTITY)
382 		return TEE_SUCCESS;
383 
384 	if (id == NSAPP_IDENTITY) {
385 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
386 			DMSG("nsec tries to hijack TA session");
387 			return TEE_ERROR_ACCESS_DENIED;
388 		}
389 		return TEE_SUCCESS;
390 	}
391 
392 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
393 		DMSG("client id mismatch");
394 		return TEE_ERROR_ACCESS_DENIED;
395 	}
396 	return TEE_SUCCESS;
397 }
398 
399 /*
400  * Check if invocation parameters matches TA properties
401  *
402  * @s - current session handle
403  * @param - already identified memory references hold a valid 'mobj'.
404  *
405  * Policy:
406  * - All TAs can access 'non-secure' shared memory.
407  * - All TAs can access TEE private memory (seccpy)
408  * - Only SDP flagged TAs can accept SDP memory references.
409  */
410 #ifndef CFG_SECURE_DATA_PATH
411 static bool check_params(struct tee_ta_session *sess __unused,
412 			 struct tee_ta_param *param __unused)
413 {
414 	/*
415 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
416 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
417 	 * permissions regarding memory reference parameters.
418 	 */
419 	return true;
420 }
421 #else
422 static bool check_params(struct tee_ta_session *sess,
423 			 struct tee_ta_param *param)
424 {
425 	int n;
426 
427 	/*
428 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
429 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
430 	 */
431 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
432 		return true;
433 
434 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
435 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
436 		struct param_mem *mem = &param->u[n].mem;
437 
438 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
439 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
440 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
441 			continue;
442 		if (!mem->size)
443 			continue;
444 		if (mobj_is_sdp_mem(mem->mobj))
445 			return false;
446 	}
447 	return true;
448 }
449 #endif
450 
451 static void set_invoke_timeout(struct tee_ta_session *sess,
452 				      uint32_t cancel_req_to)
453 {
454 	TEE_Time current_time;
455 	TEE_Time cancel_time;
456 
457 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
458 		goto infinite;
459 
460 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
461 		goto infinite;
462 
463 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
464 			 &cancel_time.seconds))
465 		goto infinite;
466 
467 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
468 	if (cancel_time.millis > 1000) {
469 		if (ADD_OVERFLOW(current_time.seconds, 1,
470 				 &cancel_time.seconds))
471 			goto infinite;
472 
473 		cancel_time.seconds++;
474 		cancel_time.millis -= 1000;
475 	}
476 
477 	sess->cancel_time = cancel_time;
478 	return;
479 
480 infinite:
481 	sess->cancel_time.seconds = UINT32_MAX;
482 	sess->cancel_time.millis = UINT32_MAX;
483 }
484 
485 /*-----------------------------------------------------------------------------
486  * Close a Trusted Application and free available resources
487  *---------------------------------------------------------------------------*/
488 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
489 				struct tee_ta_session_head *open_sessions,
490 				const TEE_Identity *clnt_id)
491 {
492 	struct tee_ta_session *sess;
493 	struct tee_ta_ctx *ctx;
494 	bool keep_alive;
495 
496 	DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id);
497 
498 	if (!csess)
499 		return TEE_ERROR_ITEM_NOT_FOUND;
500 
501 	sess = tee_ta_get_session(csess->id, true, open_sessions);
502 
503 	if (!sess) {
504 		EMSG("session 0x%" PRIxVA " to be removed is not found",
505 		     (vaddr_t)csess);
506 		return TEE_ERROR_ITEM_NOT_FOUND;
507 	}
508 
509 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
510 		tee_ta_put_session(sess);
511 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
512 	}
513 
514 	ctx = sess->ctx;
515 	DMSG("Destroy session");
516 
517 	if (!ctx) {
518 		destroy_session(sess, open_sessions);
519 		return TEE_SUCCESS;
520 	}
521 
522 	if (ctx->panicked) {
523 		destroy_session(sess, open_sessions);
524 	} else {
525 		tee_ta_set_busy(ctx);
526 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
527 		ctx->ops->enter_close_session(sess);
528 		destroy_session(sess, open_sessions);
529 		tee_ta_clear_busy(ctx);
530 	}
531 
532 	mutex_lock(&tee_ta_mutex);
533 
534 	if (ctx->ref_count <= 0)
535 		panic();
536 
537 	ctx->ref_count--;
538 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
539 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
540 	if (!ctx->ref_count && !keep_alive) {
541 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
542 		mutex_unlock(&tee_ta_mutex);
543 
544 		destroy_context(ctx);
545 	} else
546 		mutex_unlock(&tee_ta_mutex);
547 
548 	return TEE_SUCCESS;
549 }
550 
551 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
552 			struct tee_ta_session *s)
553 {
554 	/*
555 	 * If TA isn't single instance it should be loaded as new
556 	 * instance instead of doing anything with this instance.
557 	 * So tell the caller that we didn't find the TA it the
558 	 * caller will load a new instance.
559 	 */
560 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
561 		return TEE_ERROR_ITEM_NOT_FOUND;
562 
563 	/*
564 	 * The TA is single instance, if it isn't multi session we
565 	 * can't create another session unless its reference is zero
566 	 */
567 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
568 		return TEE_ERROR_BUSY;
569 
570 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
571 
572 	ctx->ref_count++;
573 	s->ctx = ctx;
574 	return TEE_SUCCESS;
575 }
576 
577 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
578 {
579 	struct tee_ta_session *last = NULL;
580 	uint32_t saved = 0;
581 	uint32_t id = 1;
582 
583 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
584 	if (last) {
585 		/* This value is less likely to be already used */
586 		id = last->id + 1;
587 		if (!id)
588 			id++; /* 0 is not valid */
589 	}
590 
591 	saved = id;
592 	do {
593 		if (!tee_ta_find_session_nolock(id, open_sessions))
594 			return id;
595 		id++;
596 		if (!id)
597 			id++;
598 	} while (id != saved);
599 
600 	return 0;
601 }
602 
603 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
604 				struct tee_ta_session_head *open_sessions,
605 				const TEE_UUID *uuid,
606 				struct tee_ta_session **sess)
607 {
608 	TEE_Result res;
609 	struct tee_ta_ctx *ctx;
610 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
611 
612 	*err = TEE_ORIGIN_TEE;
613 	if (!s)
614 		return TEE_ERROR_OUT_OF_MEMORY;
615 
616 	s->cancel_mask = true;
617 	condvar_init(&s->refc_cv);
618 	condvar_init(&s->lock_cv);
619 	s->lock_thread = THREAD_ID_INVALID;
620 	s->ref_count = 1;
621 
622 
623 	/*
624 	 * We take the global TA mutex here and hold it while doing
625 	 * RPC to load the TA. This big critical section should be broken
626 	 * down into smaller pieces.
627 	 */
628 
629 
630 	mutex_lock(&tee_ta_mutex);
631 	s->id = new_session_id(open_sessions);
632 	if (!s->id) {
633 		res = TEE_ERROR_OVERFLOW;
634 		goto out;
635 	}
636 	TAILQ_INSERT_TAIL(open_sessions, s, link);
637 
638 	/* Look for already loaded TA */
639 	ctx = tee_ta_context_find(uuid);
640 	if (ctx) {
641 		res = tee_ta_init_session_with_context(ctx, s);
642 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
643 			goto out;
644 	}
645 
646 	/* Look for pseudo TA */
647 	res = tee_ta_init_pseudo_ta_session(uuid, s);
648 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
649 		goto out;
650 
651 	/* Look for user TA */
652 	res = tee_ta_init_user_ta_session(uuid, s);
653 
654 out:
655 	if (res == TEE_SUCCESS) {
656 		*sess = s;
657 	} else {
658 		TAILQ_REMOVE(open_sessions, s, link);
659 		free(s);
660 	}
661 	mutex_unlock(&tee_ta_mutex);
662 	return res;
663 }
664 
665 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
666 			       struct tee_ta_session **sess,
667 			       struct tee_ta_session_head *open_sessions,
668 			       const TEE_UUID *uuid,
669 			       const TEE_Identity *clnt_id,
670 			       uint32_t cancel_req_to,
671 			       struct tee_ta_param *param)
672 {
673 	TEE_Result res;
674 	struct tee_ta_session *s = NULL;
675 	struct tee_ta_ctx *ctx;
676 	bool panicked;
677 	bool was_busy = false;
678 
679 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
680 	if (res != TEE_SUCCESS) {
681 		DMSG("init session failed 0x%x", res);
682 		return res;
683 	}
684 
685 	if (!check_params(s, param))
686 		return TEE_ERROR_BAD_PARAMETERS;
687 
688 	ctx = s->ctx;
689 
690 	if (!ctx || ctx->panicked) {
691 		DMSG("panicked, call tee_ta_close_session()");
692 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
693 		*err = TEE_ORIGIN_TEE;
694 		return TEE_ERROR_TARGET_DEAD;
695 	}
696 
697 	*sess = s;
698 	/* Save identity of the owner of the session */
699 	s->clnt_id = *clnt_id;
700 
701 	if (tee_ta_try_set_busy(ctx)) {
702 		set_invoke_timeout(s, cancel_req_to);
703 		res = ctx->ops->enter_open_session(s, param, err);
704 		tee_ta_clear_busy(ctx);
705 	} else {
706 		/* Deadlock avoided */
707 		res = TEE_ERROR_BUSY;
708 		was_busy = true;
709 	}
710 
711 	panicked = ctx->panicked;
712 
713 	tee_ta_put_session(s);
714 	if (panicked || (res != TEE_SUCCESS))
715 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
716 
717 	/*
718 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
719 	 * apart from panicking.
720 	 */
721 	if (panicked || was_busy)
722 		*err = TEE_ORIGIN_TEE;
723 
724 	if (res != TEE_SUCCESS)
725 		EMSG("Failed. Return error 0x%x", res);
726 
727 	return res;
728 }
729 
730 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
731 				 struct tee_ta_session *sess,
732 				 const TEE_Identity *clnt_id,
733 				 uint32_t cancel_req_to, uint32_t cmd,
734 				 struct tee_ta_param *param)
735 {
736 	TEE_Result res;
737 
738 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
739 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
740 
741 	if (!check_params(sess, param))
742 		return TEE_ERROR_BAD_PARAMETERS;
743 
744 	if (!sess->ctx) {
745 		/* The context has been already destroyed */
746 		*err = TEE_ORIGIN_TEE;
747 		return TEE_ERROR_TARGET_DEAD;
748 	} else if (sess->ctx->panicked) {
749 		DMSG("Panicked !");
750 		destroy_ta_ctx_from_session(sess);
751 		*err = TEE_ORIGIN_TEE;
752 		return TEE_ERROR_TARGET_DEAD;
753 	}
754 
755 	tee_ta_set_busy(sess->ctx);
756 
757 	set_invoke_timeout(sess, cancel_req_to);
758 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
759 
760 	tee_ta_clear_busy(sess->ctx);
761 
762 	if (sess->ctx->panicked) {
763 		destroy_ta_ctx_from_session(sess);
764 		*err = TEE_ORIGIN_TEE;
765 		return TEE_ERROR_TARGET_DEAD;
766 	}
767 
768 	/* Short buffer is not an effective error case */
769 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
770 		DMSG("Error: %x of %d", res, *err);
771 
772 	return res;
773 }
774 
775 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
776 				 struct tee_ta_session *sess,
777 				 const TEE_Identity *clnt_id)
778 {
779 	*err = TEE_ORIGIN_TEE;
780 
781 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
782 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
783 
784 	sess->cancel = true;
785 	return TEE_SUCCESS;
786 }
787 
788 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
789 {
790 	TEE_Time current_time;
791 
792 	if (s->cancel_mask)
793 		return false;
794 
795 	if (s->cancel)
796 		return true;
797 
798 	if (s->cancel_time.seconds == UINT32_MAX)
799 		return false;
800 
801 	if (curr_time != NULL)
802 		current_time = *curr_time;
803 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
804 		return false;
805 
806 	if (current_time.seconds > s->cancel_time.seconds ||
807 	    (current_time.seconds == s->cancel_time.seconds &&
808 	     current_time.millis >= s->cancel_time.millis)) {
809 		return true;
810 	}
811 
812 	return false;
813 }
814 
815 static void update_current_ctx(struct thread_specific_data *tsd)
816 {
817 	struct tee_ta_ctx *ctx = NULL;
818 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
819 
820 	if (s) {
821 		if (is_pseudo_ta_ctx(s->ctx))
822 			s = TAILQ_NEXT(s, link_tsd);
823 
824 		if (s)
825 			ctx = s->ctx;
826 	}
827 
828 	if (tsd->ctx != ctx)
829 		tee_mmu_set_ctx(ctx);
830 	/*
831 	 * If ctx->mmu == NULL we must not have user mapping active,
832 	 * if ctx->mmu != NULL we must have user mapping active.
833 	 */
834 	if (((is_user_ta_ctx(ctx) ?
835 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
836 					core_mmu_user_mapping_is_active())
837 		panic("unexpected active mapping");
838 }
839 
840 void tee_ta_push_current_session(struct tee_ta_session *sess)
841 {
842 	struct thread_specific_data *tsd = thread_get_tsd();
843 
844 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
845 	update_current_ctx(tsd);
846 }
847 
848 struct tee_ta_session *tee_ta_pop_current_session(void)
849 {
850 	struct thread_specific_data *tsd = thread_get_tsd();
851 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
852 
853 	if (s) {
854 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
855 		update_current_ctx(tsd);
856 	}
857 	return s;
858 }
859 
860 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
861 {
862 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
863 
864 	if (!s)
865 		return TEE_ERROR_BAD_STATE;
866 	*sess = s;
867 	return TEE_SUCCESS;
868 }
869 
870 struct tee_ta_session *tee_ta_get_calling_session(void)
871 {
872 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
873 
874 	if (s)
875 		s = TAILQ_NEXT(s, link_tsd);
876 	return s;
877 }
878 
879 #if defined(CFG_TA_GPROF_SUPPORT)
880 void tee_ta_gprof_sample_pc(vaddr_t pc)
881 {
882 	struct tee_ta_session *s;
883 	struct sample_buf *sbuf;
884 	size_t idx;
885 
886 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
887 		return;
888 	sbuf = s->sbuf;
889 	if (!sbuf || !sbuf->enabled)
890 		return; /* PC sampling is not enabled */
891 
892 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
893 	if (idx < sbuf->nsamples)
894 		sbuf->samples[idx]++;
895 	sbuf->count++;
896 }
897 
898 /*
899  * Update user-mode CPU time for the current session
900  * @suspend: true if session is being suspended (leaving user mode), false if
901  * it is resumed (entering user mode)
902  */
903 static void tee_ta_update_session_utime(bool suspend)
904 {
905 	struct tee_ta_session *s;
906 	struct sample_buf *sbuf;
907 	uint64_t now;
908 
909 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
910 		return;
911 	sbuf = s->sbuf;
912 	if (!sbuf)
913 		return;
914 	now = read_cntpct();
915 	if (suspend) {
916 		assert(sbuf->usr_entered);
917 		sbuf->usr += now - sbuf->usr_entered;
918 		sbuf->usr_entered = 0;
919 	} else {
920 		assert(!sbuf->usr_entered);
921 		if (!now)
922 			now++; /* 0 is reserved */
923 		sbuf->usr_entered = now;
924 	}
925 }
926 
927 void tee_ta_update_session_utime_suspend(void)
928 {
929 	tee_ta_update_session_utime(true);
930 }
931 
932 void tee_ta_update_session_utime_resume(void)
933 {
934 	tee_ta_update_session_utime(false);
935 }
936 #endif
937