xref: /optee_os/core/kernel/tee_ta_manager.c (revision 3dd0e94e9ea7b3baf41bfa7f7182765ec63e02f1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/ftrace.h>
14 #include <kernel/mutex.h>
15 #include <kernel/panic.h>
16 #include <kernel/pseudo_ta.h>
17 #include <kernel/tee_common.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/tee_ta_manager.h>
20 #include <kernel/tee_time.h>
21 #include <kernel/thread.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_mmu.h>
24 #include <mm/core_memprot.h>
25 #include <mm/mobj.h>
26 #include <mm/tee_mmu.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_svc_cryp.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_storage.h>
31 #include <tee_api_types.h>
32 #include <trace.h>
33 #include <utee_types.h>
34 #include <util.h>
35 
36 /* This mutex protects the critical section in tee_ta_init_session */
37 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
38 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
39 
40 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
41 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
42 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
43 static size_t tee_ta_single_instance_count;
44 #endif
45 
46 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
47 static void lock_single_instance(void)
48 {
49 }
50 
51 static void unlock_single_instance(void)
52 {
53 }
54 
55 static bool has_single_instance_lock(void)
56 {
57 	return false;
58 }
59 #else
60 static void lock_single_instance(void)
61 {
62 	/* Requires tee_ta_mutex to be held */
63 	if (tee_ta_single_instance_thread != thread_get_id()) {
64 		/* Wait until the single-instance lock is available. */
65 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
66 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
67 
68 		tee_ta_single_instance_thread = thread_get_id();
69 		assert(tee_ta_single_instance_count == 0);
70 	}
71 
72 	tee_ta_single_instance_count++;
73 }
74 
75 static void unlock_single_instance(void)
76 {
77 	/* Requires tee_ta_mutex to be held */
78 	assert(tee_ta_single_instance_thread == thread_get_id());
79 	assert(tee_ta_single_instance_count > 0);
80 
81 	tee_ta_single_instance_count--;
82 	if (tee_ta_single_instance_count == 0) {
83 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
84 		condvar_signal(&tee_ta_cv);
85 	}
86 }
87 
88 static bool has_single_instance_lock(void)
89 {
90 	/* Requires tee_ta_mutex to be held */
91 	return tee_ta_single_instance_thread == thread_get_id();
92 }
93 #endif
94 
95 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
96 {
97 	bool rc = true;
98 
99 	if (ctx->flags & TA_FLAG_CONCURRENT)
100 		return true;
101 
102 	mutex_lock(&tee_ta_mutex);
103 
104 	if (ctx->initializing) {
105 		/*
106 		 * Context is still initializing and flags cannot be relied
107 		 * on for user TAs. Wait here until it's initialized.
108 		 */
109 		while (ctx->busy)
110 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
111 	}
112 
113 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
114 		lock_single_instance();
115 
116 	if (has_single_instance_lock()) {
117 		if (ctx->busy) {
118 			/*
119 			 * We're holding the single-instance lock and the
120 			 * TA is busy, as waiting now would only cause a
121 			 * dead-lock, we release the lock and return false.
122 			 */
123 			rc = false;
124 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
125 				unlock_single_instance();
126 		}
127 	} else {
128 		/*
129 		 * We're not holding the single-instance lock, we're free to
130 		 * wait for the TA to become available.
131 		 */
132 		while (ctx->busy)
133 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
134 	}
135 
136 	/* Either it's already true or we should set it to true */
137 	ctx->busy = true;
138 
139 	mutex_unlock(&tee_ta_mutex);
140 	return rc;
141 }
142 
143 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
144 {
145 	if (!tee_ta_try_set_busy(ctx))
146 		panic();
147 }
148 
149 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
150 {
151 	if (ctx->flags & TA_FLAG_CONCURRENT)
152 		return;
153 
154 	mutex_lock(&tee_ta_mutex);
155 
156 	assert(ctx->busy);
157 	ctx->busy = false;
158 	condvar_signal(&ctx->busy_cv);
159 
160 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
161 		unlock_single_instance();
162 
163 	ctx->initializing = false;
164 
165 	mutex_unlock(&tee_ta_mutex);
166 }
167 
168 static void dec_session_ref_count(struct tee_ta_session *s)
169 {
170 	assert(s->ref_count > 0);
171 	s->ref_count--;
172 	if (s->ref_count == 1)
173 		condvar_signal(&s->refc_cv);
174 }
175 
176 void tee_ta_put_session(struct tee_ta_session *s)
177 {
178 	mutex_lock(&tee_ta_mutex);
179 
180 	if (s->lock_thread == thread_get_id()) {
181 		s->lock_thread = THREAD_ID_INVALID;
182 		condvar_signal(&s->lock_cv);
183 	}
184 	dec_session_ref_count(s);
185 
186 	mutex_unlock(&tee_ta_mutex);
187 }
188 
189 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
190 			struct tee_ta_session_head *open_sessions)
191 {
192 	struct tee_ta_session *s = NULL;
193 	struct tee_ta_session *found = NULL;
194 
195 	TAILQ_FOREACH(s, open_sessions, link) {
196 		if (s->id == id) {
197 			found = s;
198 			break;
199 		}
200 	}
201 
202 	return found;
203 }
204 
205 struct tee_ta_session *tee_ta_find_session(uint32_t id,
206 			struct tee_ta_session_head *open_sessions)
207 {
208 	struct tee_ta_session *s = NULL;
209 
210 	mutex_lock(&tee_ta_mutex);
211 
212 	s = tee_ta_find_session_nolock(id, open_sessions);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 
216 	return s;
217 }
218 
219 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
220 			struct tee_ta_session_head *open_sessions)
221 {
222 	struct tee_ta_session *s;
223 
224 	mutex_lock(&tee_ta_mutex);
225 
226 	while (true) {
227 		s = tee_ta_find_session_nolock(id, open_sessions);
228 		if (!s)
229 			break;
230 		if (s->unlink) {
231 			s = NULL;
232 			break;
233 		}
234 		s->ref_count++;
235 		if (!exclusive)
236 			break;
237 
238 		assert(s->lock_thread != thread_get_id());
239 
240 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
241 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
242 
243 		if (s->unlink) {
244 			dec_session_ref_count(s);
245 			s = NULL;
246 			break;
247 		}
248 
249 		s->lock_thread = thread_get_id();
250 		break;
251 	}
252 
253 	mutex_unlock(&tee_ta_mutex);
254 	return s;
255 }
256 
257 static void tee_ta_unlink_session(struct tee_ta_session *s,
258 			struct tee_ta_session_head *open_sessions)
259 {
260 	mutex_lock(&tee_ta_mutex);
261 
262 	assert(s->ref_count >= 1);
263 	assert(s->lock_thread == thread_get_id());
264 	assert(!s->unlink);
265 
266 	s->unlink = true;
267 	condvar_broadcast(&s->lock_cv);
268 
269 	while (s->ref_count != 1)
270 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
271 
272 	TAILQ_REMOVE(open_sessions, s, link);
273 
274 	mutex_unlock(&tee_ta_mutex);
275 }
276 
277 static void destroy_session(struct tee_ta_session *s,
278 			    struct tee_ta_session_head *open_sessions)
279 {
280 #if defined(CFG_TA_FTRACE_SUPPORT)
281 	if (s->ctx) {
282 		tee_ta_push_current_session(s);
283 		ta_fbuf_dump(s);
284 		tee_ta_pop_current_session();
285 	}
286 #endif
287 
288 	tee_ta_unlink_session(s, open_sessions);
289 #if defined(CFG_TA_GPROF_SUPPORT)
290 	free(s->sbuf);
291 #endif
292 	free(s);
293 }
294 
295 static void destroy_context(struct tee_ta_ctx *ctx)
296 {
297 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
298 
299 	condvar_destroy(&ctx->busy_cv);
300 	pgt_flush_ctx(ctx);
301 	ctx->ops->destroy(ctx);
302 }
303 
304 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
305 {
306 	struct tee_ta_session *sess = NULL;
307 	struct tee_ta_session_head *open_sessions = NULL;
308 	struct tee_ta_ctx *ctx = NULL;
309 	struct user_ta_ctx *utc = NULL;
310 	size_t count = 1; /* start counting the references to the context */
311 
312 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
313 
314 	mutex_lock(&tee_ta_mutex);
315 	nsec_sessions_list_head(&open_sessions);
316 
317 	/*
318 	 * Next two loops will remove all references to the context which is
319 	 * about to be destroyed, but avoiding such operation to the current
320 	 * session. That will be done later in this function, only after
321 	 * the context will be properly destroyed.
322 	 */
323 
324 	/*
325 	 * Scan the entire list of opened sessions by the clients from
326 	 * non-secure world.
327 	 */
328 	TAILQ_FOREACH(sess, open_sessions, link) {
329 		if (sess->ctx == s->ctx && sess != s) {
330 			sess->ctx = NULL;
331 			count++;
332 		}
333 	}
334 
335 	/*
336 	 * Scan all sessions opened from secure side by searching through
337 	 * all available TA instances and for each context, scan all opened
338 	 * sessions.
339 	 */
340 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
341 		if (is_user_ta_ctx(ctx)) {
342 			utc = to_user_ta_ctx(ctx);
343 
344 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
345 				if (sess->ctx == s->ctx && sess != s) {
346 					sess->ctx = NULL;
347 					count++;
348 				}
349 			}
350 		}
351 	}
352 
353 	assert(count == s->ctx->ref_count);
354 
355 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
356 	mutex_unlock(&tee_ta_mutex);
357 
358 	destroy_context(s->ctx);
359 
360 	s->ctx = NULL;
361 }
362 
363 /*
364  * tee_ta_context_find - Find TA in session list based on a UUID (input)
365  * Returns a pointer to the session
366  */
367 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
368 {
369 	struct tee_ta_ctx *ctx;
370 
371 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
372 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
373 			return ctx;
374 	}
375 
376 	return NULL;
377 }
378 
379 /* check if requester (client ID) matches session initial client */
380 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
381 {
382 	if (id == KERN_IDENTITY)
383 		return TEE_SUCCESS;
384 
385 	if (id == NSAPP_IDENTITY) {
386 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
387 			DMSG("nsec tries to hijack TA session");
388 			return TEE_ERROR_ACCESS_DENIED;
389 		}
390 		return TEE_SUCCESS;
391 	}
392 
393 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
394 		DMSG("client id mismatch");
395 		return TEE_ERROR_ACCESS_DENIED;
396 	}
397 	return TEE_SUCCESS;
398 }
399 
400 /*
401  * Check if invocation parameters matches TA properties
402  *
403  * @s - current session handle
404  * @param - already identified memory references hold a valid 'mobj'.
405  *
406  * Policy:
407  * - All TAs can access 'non-secure' shared memory.
408  * - All TAs can access TEE private memory (seccpy)
409  * - Only SDP flagged TAs can accept SDP memory references.
410  */
411 #ifndef CFG_SECURE_DATA_PATH
412 static bool check_params(struct tee_ta_session *sess __unused,
413 			 struct tee_ta_param *param __unused)
414 {
415 	/*
416 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
417 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
418 	 * permissions regarding memory reference parameters.
419 	 */
420 	return true;
421 }
422 #else
423 static bool check_params(struct tee_ta_session *sess,
424 			 struct tee_ta_param *param)
425 {
426 	int n;
427 
428 	/*
429 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
430 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
431 	 */
432 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
433 		return true;
434 
435 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
436 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
437 		struct param_mem *mem = &param->u[n].mem;
438 
439 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
440 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
441 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
442 			continue;
443 		if (!mem->size)
444 			continue;
445 		if (mobj_is_sdp_mem(mem->mobj))
446 			return false;
447 	}
448 	return true;
449 }
450 #endif
451 
452 static void set_invoke_timeout(struct tee_ta_session *sess,
453 				      uint32_t cancel_req_to)
454 {
455 	TEE_Time current_time;
456 	TEE_Time cancel_time;
457 
458 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
459 		goto infinite;
460 
461 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
462 		goto infinite;
463 
464 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
465 			 &cancel_time.seconds))
466 		goto infinite;
467 
468 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
469 	if (cancel_time.millis > 1000) {
470 		if (ADD_OVERFLOW(current_time.seconds, 1,
471 				 &cancel_time.seconds))
472 			goto infinite;
473 
474 		cancel_time.seconds++;
475 		cancel_time.millis -= 1000;
476 	}
477 
478 	sess->cancel_time = cancel_time;
479 	return;
480 
481 infinite:
482 	sess->cancel_time.seconds = UINT32_MAX;
483 	sess->cancel_time.millis = UINT32_MAX;
484 }
485 
486 /*-----------------------------------------------------------------------------
487  * Close a Trusted Application and free available resources
488  *---------------------------------------------------------------------------*/
489 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
490 				struct tee_ta_session_head *open_sessions,
491 				const TEE_Identity *clnt_id)
492 {
493 	struct tee_ta_session *sess;
494 	struct tee_ta_ctx *ctx;
495 	bool keep_alive;
496 
497 	DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id);
498 
499 	if (!csess)
500 		return TEE_ERROR_ITEM_NOT_FOUND;
501 
502 	sess = tee_ta_get_session(csess->id, true, open_sessions);
503 
504 	if (!sess) {
505 		EMSG("session 0x%" PRIxVA " to be removed is not found",
506 		     (vaddr_t)csess);
507 		return TEE_ERROR_ITEM_NOT_FOUND;
508 	}
509 
510 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
511 		tee_ta_put_session(sess);
512 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
513 	}
514 
515 	ctx = sess->ctx;
516 	DMSG("Destroy session");
517 
518 	if (!ctx) {
519 		destroy_session(sess, open_sessions);
520 		return TEE_SUCCESS;
521 	}
522 
523 	if (ctx->panicked) {
524 		destroy_session(sess, open_sessions);
525 	} else {
526 		tee_ta_set_busy(ctx);
527 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
528 		ctx->ops->enter_close_session(sess);
529 		destroy_session(sess, open_sessions);
530 		tee_ta_clear_busy(ctx);
531 	}
532 
533 	mutex_lock(&tee_ta_mutex);
534 
535 	if (ctx->ref_count <= 0)
536 		panic();
537 
538 	ctx->ref_count--;
539 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
540 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
541 	if (!ctx->ref_count && !keep_alive) {
542 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
543 		mutex_unlock(&tee_ta_mutex);
544 
545 		destroy_context(ctx);
546 	} else
547 		mutex_unlock(&tee_ta_mutex);
548 
549 	return TEE_SUCCESS;
550 }
551 
552 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
553 			struct tee_ta_session *s)
554 {
555 	/*
556 	 * If TA isn't single instance it should be loaded as new
557 	 * instance instead of doing anything with this instance.
558 	 * So tell the caller that we didn't find the TA it the
559 	 * caller will load a new instance.
560 	 */
561 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
562 		return TEE_ERROR_ITEM_NOT_FOUND;
563 
564 	/*
565 	 * The TA is single instance, if it isn't multi session we
566 	 * can't create another session unless its reference is zero
567 	 */
568 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
569 		return TEE_ERROR_BUSY;
570 
571 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
572 
573 	ctx->ref_count++;
574 	s->ctx = ctx;
575 	return TEE_SUCCESS;
576 }
577 
578 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
579 {
580 	struct tee_ta_session *last = NULL;
581 	uint32_t saved = 0;
582 	uint32_t id = 1;
583 
584 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
585 	if (last) {
586 		/* This value is less likely to be already used */
587 		id = last->id + 1;
588 		if (!id)
589 			id++; /* 0 is not valid */
590 	}
591 
592 	saved = id;
593 	do {
594 		if (!tee_ta_find_session_nolock(id, open_sessions))
595 			return id;
596 		id++;
597 		if (!id)
598 			id++;
599 	} while (id != saved);
600 
601 	return 0;
602 }
603 
604 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
605 				struct tee_ta_session_head *open_sessions,
606 				const TEE_UUID *uuid,
607 				struct tee_ta_session **sess)
608 {
609 	TEE_Result res;
610 	struct tee_ta_ctx *ctx;
611 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
612 
613 	*err = TEE_ORIGIN_TEE;
614 	if (!s)
615 		return TEE_ERROR_OUT_OF_MEMORY;
616 
617 	s->cancel_mask = true;
618 	condvar_init(&s->refc_cv);
619 	condvar_init(&s->lock_cv);
620 	s->lock_thread = THREAD_ID_INVALID;
621 	s->ref_count = 1;
622 
623 
624 	/*
625 	 * We take the global TA mutex here and hold it while doing
626 	 * RPC to load the TA. This big critical section should be broken
627 	 * down into smaller pieces.
628 	 */
629 
630 
631 	mutex_lock(&tee_ta_mutex);
632 	s->id = new_session_id(open_sessions);
633 	if (!s->id) {
634 		res = TEE_ERROR_OVERFLOW;
635 		goto out;
636 	}
637 	TAILQ_INSERT_TAIL(open_sessions, s, link);
638 
639 	/* Look for already loaded TA */
640 	ctx = tee_ta_context_find(uuid);
641 	if (ctx) {
642 		res = tee_ta_init_session_with_context(ctx, s);
643 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
644 			goto out;
645 	}
646 
647 	/* Look for pseudo TA */
648 	res = tee_ta_init_pseudo_ta_session(uuid, s);
649 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
650 		goto out;
651 
652 	/* Look for user TA */
653 	res = tee_ta_init_user_ta_session(uuid, s);
654 
655 out:
656 	if (res == TEE_SUCCESS) {
657 		*sess = s;
658 	} else {
659 		TAILQ_REMOVE(open_sessions, s, link);
660 		free(s);
661 	}
662 	mutex_unlock(&tee_ta_mutex);
663 	return res;
664 }
665 
666 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
667 			       struct tee_ta_session **sess,
668 			       struct tee_ta_session_head *open_sessions,
669 			       const TEE_UUID *uuid,
670 			       const TEE_Identity *clnt_id,
671 			       uint32_t cancel_req_to,
672 			       struct tee_ta_param *param)
673 {
674 	TEE_Result res;
675 	struct tee_ta_session *s = NULL;
676 	struct tee_ta_ctx *ctx;
677 	bool panicked;
678 	bool was_busy = false;
679 
680 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
681 	if (res != TEE_SUCCESS) {
682 		DMSG("init session failed 0x%x", res);
683 		return res;
684 	}
685 
686 	if (!check_params(s, param))
687 		return TEE_ERROR_BAD_PARAMETERS;
688 
689 	ctx = s->ctx;
690 
691 	if (!ctx || ctx->panicked) {
692 		DMSG("panicked, call tee_ta_close_session()");
693 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
694 		*err = TEE_ORIGIN_TEE;
695 		return TEE_ERROR_TARGET_DEAD;
696 	}
697 
698 	*sess = s;
699 	/* Save identity of the owner of the session */
700 	s->clnt_id = *clnt_id;
701 
702 	if (tee_ta_try_set_busy(ctx)) {
703 		set_invoke_timeout(s, cancel_req_to);
704 		res = ctx->ops->enter_open_session(s, param, err);
705 		tee_ta_clear_busy(ctx);
706 	} else {
707 		/* Deadlock avoided */
708 		res = TEE_ERROR_BUSY;
709 		was_busy = true;
710 	}
711 
712 	panicked = ctx->panicked;
713 
714 	tee_ta_put_session(s);
715 	if (panicked || (res != TEE_SUCCESS))
716 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
717 
718 	/*
719 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
720 	 * apart from panicking.
721 	 */
722 	if (panicked || was_busy)
723 		*err = TEE_ORIGIN_TEE;
724 
725 	if (res != TEE_SUCCESS)
726 		EMSG("Failed. Return error 0x%x", res);
727 
728 	return res;
729 }
730 
731 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
732 				 struct tee_ta_session *sess,
733 				 const TEE_Identity *clnt_id,
734 				 uint32_t cancel_req_to, uint32_t cmd,
735 				 struct tee_ta_param *param)
736 {
737 	TEE_Result res;
738 
739 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
740 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
741 
742 	if (!check_params(sess, param))
743 		return TEE_ERROR_BAD_PARAMETERS;
744 
745 	if (!sess->ctx) {
746 		/* The context has been already destroyed */
747 		*err = TEE_ORIGIN_TEE;
748 		return TEE_ERROR_TARGET_DEAD;
749 	} else if (sess->ctx->panicked) {
750 		DMSG("Panicked !");
751 		destroy_ta_ctx_from_session(sess);
752 		*err = TEE_ORIGIN_TEE;
753 		return TEE_ERROR_TARGET_DEAD;
754 	}
755 
756 	tee_ta_set_busy(sess->ctx);
757 
758 	set_invoke_timeout(sess, cancel_req_to);
759 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
760 
761 	tee_ta_clear_busy(sess->ctx);
762 
763 	if (sess->ctx->panicked) {
764 		destroy_ta_ctx_from_session(sess);
765 		*err = TEE_ORIGIN_TEE;
766 		return TEE_ERROR_TARGET_DEAD;
767 	}
768 
769 	/* Short buffer is not an effective error case */
770 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
771 		DMSG("Error: %x of %d", res, *err);
772 
773 	return res;
774 }
775 
776 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
777 				 struct tee_ta_session *sess,
778 				 const TEE_Identity *clnt_id)
779 {
780 	*err = TEE_ORIGIN_TEE;
781 
782 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
783 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
784 
785 	sess->cancel = true;
786 	return TEE_SUCCESS;
787 }
788 
789 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
790 {
791 	TEE_Time current_time;
792 
793 	if (s->cancel_mask)
794 		return false;
795 
796 	if (s->cancel)
797 		return true;
798 
799 	if (s->cancel_time.seconds == UINT32_MAX)
800 		return false;
801 
802 	if (curr_time != NULL)
803 		current_time = *curr_time;
804 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
805 		return false;
806 
807 	if (current_time.seconds > s->cancel_time.seconds ||
808 	    (current_time.seconds == s->cancel_time.seconds &&
809 	     current_time.millis >= s->cancel_time.millis)) {
810 		return true;
811 	}
812 
813 	return false;
814 }
815 
816 static void update_current_ctx(struct thread_specific_data *tsd)
817 {
818 	struct tee_ta_ctx *ctx = NULL;
819 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
820 
821 	if (s) {
822 		if (is_pseudo_ta_ctx(s->ctx))
823 			s = TAILQ_NEXT(s, link_tsd);
824 
825 		if (s)
826 			ctx = s->ctx;
827 	}
828 
829 	if (tsd->ctx != ctx)
830 		tee_mmu_set_ctx(ctx);
831 	/*
832 	 * If ctx->mmu == NULL we must not have user mapping active,
833 	 * if ctx->mmu != NULL we must have user mapping active.
834 	 */
835 	if (((is_user_ta_ctx(ctx) ?
836 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
837 					core_mmu_user_mapping_is_active())
838 		panic("unexpected active mapping");
839 }
840 
841 void tee_ta_push_current_session(struct tee_ta_session *sess)
842 {
843 	struct thread_specific_data *tsd = thread_get_tsd();
844 
845 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
846 	update_current_ctx(tsd);
847 }
848 
849 struct tee_ta_session *tee_ta_pop_current_session(void)
850 {
851 	struct thread_specific_data *tsd = thread_get_tsd();
852 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
853 
854 	if (s) {
855 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
856 		update_current_ctx(tsd);
857 	}
858 	return s;
859 }
860 
861 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
862 {
863 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
864 
865 	if (!s)
866 		return TEE_ERROR_BAD_STATE;
867 	*sess = s;
868 	return TEE_SUCCESS;
869 }
870 
871 struct tee_ta_session *tee_ta_get_calling_session(void)
872 {
873 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
874 
875 	if (s)
876 		s = TAILQ_NEXT(s, link_tsd);
877 	return s;
878 }
879 
880 #if defined(CFG_TA_GPROF_SUPPORT)
881 void tee_ta_gprof_sample_pc(vaddr_t pc)
882 {
883 	struct tee_ta_session *s;
884 	struct sample_buf *sbuf;
885 	size_t idx;
886 
887 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
888 		return;
889 	sbuf = s->sbuf;
890 	if (!sbuf || !sbuf->enabled)
891 		return; /* PC sampling is not enabled */
892 
893 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
894 	if (idx < sbuf->nsamples)
895 		sbuf->samples[idx]++;
896 	sbuf->count++;
897 }
898 
899 /*
900  * Update user-mode CPU time for the current session
901  * @suspend: true if session is being suspended (leaving user mode), false if
902  * it is resumed (entering user mode)
903  */
904 static void tee_ta_update_session_utime(bool suspend)
905 {
906 	struct tee_ta_session *s;
907 	struct sample_buf *sbuf;
908 	uint64_t now;
909 
910 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
911 		return;
912 	sbuf = s->sbuf;
913 	if (!sbuf)
914 		return;
915 	now = read_cntpct();
916 	if (suspend) {
917 		assert(sbuf->usr_entered);
918 		sbuf->usr += now - sbuf->usr_entered;
919 		sbuf->usr_entered = 0;
920 	} else {
921 		assert(!sbuf->usr_entered);
922 		if (!now)
923 			now++; /* 0 is reserved */
924 		sbuf->usr_entered = now;
925 	}
926 }
927 
928 void tee_ta_update_session_utime_suspend(void)
929 {
930 	tee_ta_update_session_utime(true);
931 }
932 
933 void tee_ta_update_session_utime_resume(void)
934 {
935 	tee_ta_update_session_utime(false);
936 }
937 #endif
938