xref: /optee_os/core/kernel/tee_ta_manager.c (revision c2ce4186edb0412c8a0069ff4ee2eb2ec66d09fe)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/mutex.h>
14 #include <kernel/panic.h>
15 #include <kernel/pseudo_ta.h>
16 #include <kernel/tee_common.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/tee_ta_manager.h>
19 #include <kernel/tee_time.h>
20 #include <kernel/thread.h>
21 #include <kernel/user_ta.h>
22 #include <mm/core_mmu.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mmu.h>
26 #include <tee/entry_std.h>
27 #include <tee/tee_svc_cryp.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_storage.h>
30 #include <tee_api_types.h>
31 #include <trace.h>
32 #include <utee_types.h>
33 #include <util.h>
34 
35 /* This mutex protects the critical section in tee_ta_init_session */
36 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
37 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
38 
39 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
40 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
41 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
42 static size_t tee_ta_single_instance_count;
43 #endif
44 
45 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
46 static void lock_single_instance(void)
47 {
48 }
49 
50 static void unlock_single_instance(void)
51 {
52 }
53 
54 static bool has_single_instance_lock(void)
55 {
56 	return false;
57 }
58 #else
59 static void lock_single_instance(void)
60 {
61 	/* Requires tee_ta_mutex to be held */
62 	if (tee_ta_single_instance_thread != thread_get_id()) {
63 		/* Wait until the single-instance lock is available. */
64 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
65 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
66 
67 		tee_ta_single_instance_thread = thread_get_id();
68 		assert(tee_ta_single_instance_count == 0);
69 	}
70 
71 	tee_ta_single_instance_count++;
72 }
73 
74 static void unlock_single_instance(void)
75 {
76 	/* Requires tee_ta_mutex to be held */
77 	assert(tee_ta_single_instance_thread == thread_get_id());
78 	assert(tee_ta_single_instance_count > 0);
79 
80 	tee_ta_single_instance_count--;
81 	if (tee_ta_single_instance_count == 0) {
82 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
83 		condvar_signal(&tee_ta_cv);
84 	}
85 }
86 
87 static bool has_single_instance_lock(void)
88 {
89 	/* Requires tee_ta_mutex to be held */
90 	return tee_ta_single_instance_thread == thread_get_id();
91 }
92 #endif
93 
94 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
95 {
96 	bool rc = true;
97 
98 	if (ctx->flags & TA_FLAG_CONCURRENT)
99 		return true;
100 
101 	mutex_lock(&tee_ta_mutex);
102 
103 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
104 		lock_single_instance();
105 
106 	if (has_single_instance_lock()) {
107 		if (ctx->busy) {
108 			/*
109 			 * We're holding the single-instance lock and the
110 			 * TA is busy, as waiting now would only cause a
111 			 * dead-lock, we release the lock and return false.
112 			 */
113 			rc = false;
114 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
115 				unlock_single_instance();
116 		}
117 	} else {
118 		/*
119 		 * We're not holding the single-instance lock, we're free to
120 		 * wait for the TA to become available.
121 		 */
122 		while (ctx->busy)
123 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
124 	}
125 
126 	/* Either it's already true or we should set it to true */
127 	ctx->busy = true;
128 
129 	mutex_unlock(&tee_ta_mutex);
130 	return rc;
131 }
132 
133 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
134 {
135 	if (!tee_ta_try_set_busy(ctx))
136 		panic();
137 }
138 
139 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
140 {
141 	if (ctx->flags & TA_FLAG_CONCURRENT)
142 		return;
143 
144 	mutex_lock(&tee_ta_mutex);
145 
146 	assert(ctx->busy);
147 	ctx->busy = false;
148 	condvar_signal(&ctx->busy_cv);
149 
150 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
151 		unlock_single_instance();
152 
153 	mutex_unlock(&tee_ta_mutex);
154 }
155 
156 static void dec_session_ref_count(struct tee_ta_session *s)
157 {
158 	assert(s->ref_count > 0);
159 	s->ref_count--;
160 	if (s->ref_count == 1)
161 		condvar_signal(&s->refc_cv);
162 }
163 
164 void tee_ta_put_session(struct tee_ta_session *s)
165 {
166 	mutex_lock(&tee_ta_mutex);
167 
168 	if (s->lock_thread == thread_get_id()) {
169 		s->lock_thread = THREAD_ID_INVALID;
170 		condvar_signal(&s->lock_cv);
171 	}
172 	dec_session_ref_count(s);
173 
174 	mutex_unlock(&tee_ta_mutex);
175 }
176 
177 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
178 			struct tee_ta_session_head *open_sessions)
179 {
180 	struct tee_ta_session *s = NULL;
181 	struct tee_ta_session *found = NULL;
182 
183 	TAILQ_FOREACH(s, open_sessions, link) {
184 		if (s->id == id) {
185 			found = s;
186 			break;
187 		}
188 	}
189 
190 	return found;
191 }
192 
193 struct tee_ta_session *tee_ta_find_session(uint32_t id,
194 			struct tee_ta_session_head *open_sessions)
195 {
196 	struct tee_ta_session *s = NULL;
197 
198 	mutex_lock(&tee_ta_mutex);
199 
200 	s = tee_ta_find_session_nolock(id, open_sessions);
201 
202 	mutex_unlock(&tee_ta_mutex);
203 
204 	return s;
205 }
206 
207 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
208 			struct tee_ta_session_head *open_sessions)
209 {
210 	struct tee_ta_session *s;
211 
212 	mutex_lock(&tee_ta_mutex);
213 
214 	while (true) {
215 		s = tee_ta_find_session_nolock(id, open_sessions);
216 		if (!s)
217 			break;
218 		if (s->unlink) {
219 			s = NULL;
220 			break;
221 		}
222 		s->ref_count++;
223 		if (!exclusive)
224 			break;
225 
226 		assert(s->lock_thread != thread_get_id());
227 
228 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
229 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
230 
231 		if (s->unlink) {
232 			dec_session_ref_count(s);
233 			s = NULL;
234 			break;
235 		}
236 
237 		s->lock_thread = thread_get_id();
238 		break;
239 	}
240 
241 	mutex_unlock(&tee_ta_mutex);
242 	return s;
243 }
244 
245 static void tee_ta_unlink_session(struct tee_ta_session *s,
246 			struct tee_ta_session_head *open_sessions)
247 {
248 	mutex_lock(&tee_ta_mutex);
249 
250 	assert(s->ref_count >= 1);
251 	assert(s->lock_thread == thread_get_id());
252 	assert(!s->unlink);
253 
254 	s->unlink = true;
255 	condvar_broadcast(&s->lock_cv);
256 
257 	while (s->ref_count != 1)
258 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
259 
260 	TAILQ_REMOVE(open_sessions, s, link);
261 
262 	mutex_unlock(&tee_ta_mutex);
263 }
264 
265 static void destroy_session(struct tee_ta_session *s,
266 			    struct tee_ta_session_head *open_sessions)
267 {
268 	tee_ta_unlink_session(s, open_sessions);
269 #if defined(CFG_TA_GPROF_SUPPORT)
270 	free(s->sbuf);
271 #endif
272 	free(s);
273 }
274 
275 static void destroy_context(struct tee_ta_ctx *ctx)
276 {
277 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
278 
279 	condvar_destroy(&ctx->busy_cv);
280 	pgt_flush_ctx(ctx);
281 	ctx->ops->destroy(ctx);
282 }
283 
284 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
285 {
286 	struct tee_ta_session *sess = NULL;
287 	struct tee_ta_session_head *open_sessions = NULL;
288 	struct tee_ta_ctx *ctx = NULL;
289 	struct user_ta_ctx *utc = NULL;
290 	size_t count = 1; /* start counting the references to the context */
291 
292 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
293 
294 	mutex_lock(&tee_ta_mutex);
295 	nsec_sessions_list_head(&open_sessions);
296 
297 	/*
298 	 * Next two loops will remove all references to the context which is
299 	 * about to be destroyed, but avoiding such operation to the current
300 	 * session. That will be done later in this function, only after
301 	 * the context will be properly destroyed.
302 	 */
303 
304 	/*
305 	 * Scan the entire list of opened sessions by the clients from
306 	 * non-secure world.
307 	 */
308 	TAILQ_FOREACH(sess, open_sessions, link) {
309 		if (sess->ctx == s->ctx && sess != s) {
310 			sess->ctx = NULL;
311 			count++;
312 		}
313 	}
314 
315 	/*
316 	 * Scan all sessions opened from secure side by searching through
317 	 * all available TA instances and for each context, scan all opened
318 	 * sessions.
319 	 */
320 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
321 		if (is_user_ta_ctx(ctx)) {
322 			utc = to_user_ta_ctx(ctx);
323 
324 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
325 				if (sess->ctx == s->ctx && sess != s) {
326 					sess->ctx = NULL;
327 					count++;
328 				}
329 			}
330 		}
331 	}
332 
333 	assert(count == s->ctx->ref_count);
334 
335 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
336 	mutex_unlock(&tee_ta_mutex);
337 
338 	destroy_context(s->ctx);
339 
340 	s->ctx = NULL;
341 }
342 
343 /*
344  * tee_ta_context_find - Find TA in session list based on a UUID (input)
345  * Returns a pointer to the session
346  */
347 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
348 {
349 	struct tee_ta_ctx *ctx;
350 
351 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
352 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
353 			return ctx;
354 	}
355 
356 	return NULL;
357 }
358 
359 /* check if requester (client ID) matches session initial client */
360 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
361 {
362 	if (id == KERN_IDENTITY)
363 		return TEE_SUCCESS;
364 
365 	if (id == NSAPP_IDENTITY) {
366 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
367 			DMSG("nsec tries to hijack TA session");
368 			return TEE_ERROR_ACCESS_DENIED;
369 		}
370 		return TEE_SUCCESS;
371 	}
372 
373 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
374 		DMSG("client id mismatch");
375 		return TEE_ERROR_ACCESS_DENIED;
376 	}
377 	return TEE_SUCCESS;
378 }
379 
380 /*
381  * Check if invocation parameters matches TA properties
382  *
383  * @s - current session handle
384  * @param - already identified memory references hold a valid 'mobj'.
385  *
386  * Policy:
387  * - All TAs can access 'non-secure' shared memory.
388  * - All TAs can access TEE private memory (seccpy)
389  * - Only SDP flagged TAs can accept SDP memory references.
390  */
391 #ifndef CFG_SECURE_DATA_PATH
392 static bool check_params(struct tee_ta_session *sess __unused,
393 			 struct tee_ta_param *param __unused)
394 {
395 	/*
396 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
397 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
398 	 * permissions regarding memory reference parameters.
399 	 */
400 	return true;
401 }
402 #else
403 static bool check_params(struct tee_ta_session *sess,
404 			 struct tee_ta_param *param)
405 {
406 	int n;
407 
408 	/*
409 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
410 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
411 	 */
412 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
413 		return true;
414 
415 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
416 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
417 		struct param_mem *mem = &param->u[n].mem;
418 
419 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
420 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
421 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
422 			continue;
423 		if (!mem->size)
424 			continue;
425 		if (mobj_is_sdp_mem(mem->mobj))
426 			return false;
427 	}
428 	return true;
429 }
430 #endif
431 
432 static void set_invoke_timeout(struct tee_ta_session *sess,
433 				      uint32_t cancel_req_to)
434 {
435 	TEE_Time current_time;
436 	TEE_Time cancel_time;
437 
438 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
439 		goto infinite;
440 
441 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
442 		goto infinite;
443 
444 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
445 			 &cancel_time.seconds))
446 		goto infinite;
447 
448 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
449 	if (cancel_time.millis > 1000) {
450 		if (ADD_OVERFLOW(current_time.seconds, 1,
451 				 &cancel_time.seconds))
452 			goto infinite;
453 
454 		cancel_time.seconds++;
455 		cancel_time.millis -= 1000;
456 	}
457 
458 	sess->cancel_time = cancel_time;
459 	return;
460 
461 infinite:
462 	sess->cancel_time.seconds = UINT32_MAX;
463 	sess->cancel_time.millis = UINT32_MAX;
464 }
465 
466 /*-----------------------------------------------------------------------------
467  * Close a Trusted Application and free available resources
468  *---------------------------------------------------------------------------*/
469 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
470 				struct tee_ta_session_head *open_sessions,
471 				const TEE_Identity *clnt_id)
472 {
473 	struct tee_ta_session *sess;
474 	struct tee_ta_ctx *ctx;
475 	bool keep_alive;
476 
477 	DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id);
478 
479 	if (!csess)
480 		return TEE_ERROR_ITEM_NOT_FOUND;
481 
482 	sess = tee_ta_get_session(csess->id, true, open_sessions);
483 
484 	if (!sess) {
485 		EMSG("session 0x%" PRIxVA " to be removed is not found",
486 		     (vaddr_t)csess);
487 		return TEE_ERROR_ITEM_NOT_FOUND;
488 	}
489 
490 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
491 		tee_ta_put_session(sess);
492 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
493 	}
494 
495 	ctx = sess->ctx;
496 	DMSG("Destroy session");
497 
498 	if (!ctx) {
499 		destroy_session(sess, open_sessions);
500 		return TEE_SUCCESS;
501 	}
502 
503 	assert(!ctx->panicked);
504 
505 	tee_ta_set_busy(ctx);
506 
507 	set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
508 	ctx->ops->enter_close_session(sess);
509 
510 	destroy_session(sess, open_sessions);
511 
512 	tee_ta_clear_busy(ctx);
513 
514 	mutex_lock(&tee_ta_mutex);
515 
516 	if (ctx->ref_count <= 0)
517 		panic();
518 
519 	ctx->ref_count--;
520 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
521 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
522 	if (!ctx->ref_count && !keep_alive) {
523 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
524 		mutex_unlock(&tee_ta_mutex);
525 
526 		destroy_context(ctx);
527 	} else
528 		mutex_unlock(&tee_ta_mutex);
529 
530 	return TEE_SUCCESS;
531 }
532 
533 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
534 			struct tee_ta_session *s)
535 {
536 	/*
537 	 * If TA isn't single instance it should be loaded as new
538 	 * instance instead of doing anything with this instance.
539 	 * So tell the caller that we didn't find the TA it the
540 	 * caller will load a new instance.
541 	 */
542 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
543 		return TEE_ERROR_ITEM_NOT_FOUND;
544 
545 	/*
546 	 * The TA is single instance, if it isn't multi session we
547 	 * can't create another session unless its reference is zero
548 	 */
549 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
550 		return TEE_ERROR_BUSY;
551 
552 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
553 
554 	ctx->ref_count++;
555 	s->ctx = ctx;
556 	return TEE_SUCCESS;
557 }
558 
559 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
560 {
561 	struct tee_ta_session *last = NULL;
562 	uint32_t saved = 0;
563 	uint32_t id = 1;
564 
565 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
566 	if (last) {
567 		/* This value is less likely to be already used */
568 		id = last->id + 1;
569 		if (!id)
570 			id++; /* 0 is not valid */
571 	}
572 
573 	saved = id;
574 	do {
575 		if (!tee_ta_find_session_nolock(id, open_sessions))
576 			return id;
577 		id++;
578 		if (!id)
579 			id++;
580 	} while (id != saved);
581 
582 	return 0;
583 }
584 
585 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
586 				struct tee_ta_session_head *open_sessions,
587 				const TEE_UUID *uuid,
588 				struct tee_ta_session **sess)
589 {
590 	TEE_Result res;
591 	struct tee_ta_ctx *ctx;
592 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
593 
594 	*err = TEE_ORIGIN_TEE;
595 	if (!s)
596 		return TEE_ERROR_OUT_OF_MEMORY;
597 
598 	s->cancel_mask = true;
599 	condvar_init(&s->refc_cv);
600 	condvar_init(&s->lock_cv);
601 	s->lock_thread = THREAD_ID_INVALID;
602 	s->ref_count = 1;
603 
604 
605 	/*
606 	 * We take the global TA mutex here and hold it while doing
607 	 * RPC to load the TA. This big critical section should be broken
608 	 * down into smaller pieces.
609 	 */
610 
611 
612 	mutex_lock(&tee_ta_mutex);
613 	s->id = new_session_id(open_sessions);
614 	if (!s->id) {
615 		res = TEE_ERROR_OVERFLOW;
616 		goto out;
617 	}
618 	TAILQ_INSERT_TAIL(open_sessions, s, link);
619 
620 	/* Look for already loaded TA */
621 	ctx = tee_ta_context_find(uuid);
622 	if (ctx) {
623 		res = tee_ta_init_session_with_context(ctx, s);
624 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
625 			goto out;
626 	}
627 
628 	/* Look for pseudo TA */
629 	res = tee_ta_init_pseudo_ta_session(uuid, s);
630 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
631 		goto out;
632 
633 	/* Look for user TA */
634 	res = tee_ta_init_user_ta_session(uuid, s);
635 
636 out:
637 	if (res == TEE_SUCCESS) {
638 		*sess = s;
639 	} else {
640 		TAILQ_REMOVE(open_sessions, s, link);
641 		free(s);
642 	}
643 	mutex_unlock(&tee_ta_mutex);
644 	return res;
645 }
646 
647 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
648 			       struct tee_ta_session **sess,
649 			       struct tee_ta_session_head *open_sessions,
650 			       const TEE_UUID *uuid,
651 			       const TEE_Identity *clnt_id,
652 			       uint32_t cancel_req_to,
653 			       struct tee_ta_param *param)
654 {
655 	TEE_Result res;
656 	struct tee_ta_session *s = NULL;
657 	struct tee_ta_ctx *ctx;
658 	bool panicked;
659 	bool was_busy = false;
660 
661 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
662 	if (res != TEE_SUCCESS) {
663 		DMSG("init session failed 0x%x", res);
664 		return res;
665 	}
666 
667 	if (!check_params(s, param))
668 		return TEE_ERROR_BAD_PARAMETERS;
669 
670 	ctx = s->ctx;
671 
672 	if (!ctx || ctx->panicked) {
673 		DMSG("panicked, call tee_ta_close_session()");
674 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
675 		*err = TEE_ORIGIN_TEE;
676 		return TEE_ERROR_TARGET_DEAD;
677 	}
678 
679 	*sess = s;
680 	/* Save identity of the owner of the session */
681 	s->clnt_id = *clnt_id;
682 
683 	if (tee_ta_try_set_busy(ctx)) {
684 		set_invoke_timeout(s, cancel_req_to);
685 		res = ctx->ops->enter_open_session(s, param, err);
686 		tee_ta_clear_busy(ctx);
687 	} else {
688 		/* Deadlock avoided */
689 		res = TEE_ERROR_BUSY;
690 		was_busy = true;
691 	}
692 
693 	panicked = ctx->panicked;
694 
695 	tee_ta_put_session(s);
696 	if (panicked || (res != TEE_SUCCESS))
697 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
698 
699 	/*
700 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
701 	 * apart from panicking.
702 	 */
703 	if (panicked || was_busy)
704 		*err = TEE_ORIGIN_TEE;
705 	else
706 		*err = TEE_ORIGIN_TRUSTED_APP;
707 
708 	if (res != TEE_SUCCESS)
709 		EMSG("Failed. Return error 0x%x", res);
710 
711 	return res;
712 }
713 
714 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
715 				 struct tee_ta_session *sess,
716 				 const TEE_Identity *clnt_id,
717 				 uint32_t cancel_req_to, uint32_t cmd,
718 				 struct tee_ta_param *param)
719 {
720 	TEE_Result res;
721 
722 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
723 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
724 
725 	if (!check_params(sess, param))
726 		return TEE_ERROR_BAD_PARAMETERS;
727 
728 	if (!sess->ctx) {
729 		/* The context has been already destroyed */
730 		*err = TEE_ORIGIN_TEE;
731 		return TEE_ERROR_TARGET_DEAD;
732 	} else if (sess->ctx->panicked) {
733 		DMSG("Panicked !");
734 		destroy_ta_ctx_from_session(sess);
735 		*err = TEE_ORIGIN_TEE;
736 		return TEE_ERROR_TARGET_DEAD;
737 	}
738 
739 	tee_ta_set_busy(sess->ctx);
740 
741 	set_invoke_timeout(sess, cancel_req_to);
742 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
743 
744 	tee_ta_clear_busy(sess->ctx);
745 
746 	if (sess->ctx->panicked) {
747 		destroy_ta_ctx_from_session(sess);
748 		*err = TEE_ORIGIN_TEE;
749 		return TEE_ERROR_TARGET_DEAD;
750 	}
751 
752 	/* Short buffer is not an effective error case */
753 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
754 		DMSG("Error: %x of %d", res, *err);
755 
756 	return res;
757 }
758 
759 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
760 				 struct tee_ta_session *sess,
761 				 const TEE_Identity *clnt_id)
762 {
763 	*err = TEE_ORIGIN_TEE;
764 
765 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
766 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
767 
768 	sess->cancel = true;
769 	return TEE_SUCCESS;
770 }
771 
772 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
773 {
774 	TEE_Time current_time;
775 
776 	if (s->cancel_mask)
777 		return false;
778 
779 	if (s->cancel)
780 		return true;
781 
782 	if (s->cancel_time.seconds == UINT32_MAX)
783 		return false;
784 
785 	if (curr_time != NULL)
786 		current_time = *curr_time;
787 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
788 		return false;
789 
790 	if (current_time.seconds > s->cancel_time.seconds ||
791 	    (current_time.seconds == s->cancel_time.seconds &&
792 	     current_time.millis >= s->cancel_time.millis)) {
793 		return true;
794 	}
795 
796 	return false;
797 }
798 
799 static void update_current_ctx(struct thread_specific_data *tsd)
800 {
801 	struct tee_ta_ctx *ctx = NULL;
802 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
803 
804 	if (s) {
805 		if (is_pseudo_ta_ctx(s->ctx))
806 			s = TAILQ_NEXT(s, link_tsd);
807 
808 		if (s)
809 			ctx = s->ctx;
810 	}
811 
812 	if (tsd->ctx != ctx)
813 		tee_mmu_set_ctx(ctx);
814 	/*
815 	 * If ctx->mmu == NULL we must not have user mapping active,
816 	 * if ctx->mmu != NULL we must have user mapping active.
817 	 */
818 	if (((is_user_ta_ctx(ctx) ?
819 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
820 					core_mmu_user_mapping_is_active())
821 		panic("unexpected active mapping");
822 }
823 
824 void tee_ta_push_current_session(struct tee_ta_session *sess)
825 {
826 	struct thread_specific_data *tsd = thread_get_tsd();
827 
828 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
829 	update_current_ctx(tsd);
830 }
831 
832 struct tee_ta_session *tee_ta_pop_current_session(void)
833 {
834 	struct thread_specific_data *tsd = thread_get_tsd();
835 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
836 
837 	if (s) {
838 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
839 		update_current_ctx(tsd);
840 	}
841 	return s;
842 }
843 
844 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
845 {
846 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
847 
848 	if (!s)
849 		return TEE_ERROR_BAD_STATE;
850 	*sess = s;
851 	return TEE_SUCCESS;
852 }
853 
854 struct tee_ta_session *tee_ta_get_calling_session(void)
855 {
856 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
857 
858 	if (s)
859 		s = TAILQ_NEXT(s, link_tsd);
860 	return s;
861 }
862 
863 /*
864  * dump_state - Display TA state as an error log.
865  */
866 static void dump_state(struct tee_ta_ctx *ctx)
867 {
868 	struct tee_ta_session *s = NULL;
869 	bool active __maybe_unused;
870 
871 	if (!ctx) {
872 		EMSG("No TA status: null context reference");
873 		return;
874 	}
875 
876 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
877 		  s && s->ctx == ctx);
878 
879 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
880 		active ? "(active)" : "");
881 	ctx->ops->dump_state(ctx);
882 }
883 
884 void tee_ta_dump_current(void)
885 {
886 	struct tee_ta_session *s = NULL;
887 
888 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
889 		EMSG("no valid session found, cannot log TA status");
890 		return;
891 	}
892 
893 	dump_state(s->ctx);
894 }
895 
896 #if defined(CFG_TA_GPROF_SUPPORT)
897 void tee_ta_gprof_sample_pc(vaddr_t pc)
898 {
899 	struct tee_ta_session *s;
900 	struct sample_buf *sbuf;
901 	size_t idx;
902 
903 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
904 		return;
905 	sbuf = s->sbuf;
906 	if (!sbuf || !sbuf->enabled)
907 		return; /* PC sampling is not enabled */
908 
909 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
910 	if (idx < sbuf->nsamples)
911 		sbuf->samples[idx]++;
912 	sbuf->count++;
913 }
914 
915 /*
916  * Update user-mode CPU time for the current session
917  * @suspend: true if session is being suspended (leaving user mode), false if
918  * it is resumed (entering user mode)
919  */
920 static void tee_ta_update_session_utime(bool suspend)
921 {
922 	struct tee_ta_session *s;
923 	struct sample_buf *sbuf;
924 	uint64_t now;
925 
926 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
927 		return;
928 	sbuf = s->sbuf;
929 	if (!sbuf)
930 		return;
931 	now = read_cntpct();
932 	if (suspend) {
933 		assert(sbuf->usr_entered);
934 		sbuf->usr += now - sbuf->usr_entered;
935 		sbuf->usr_entered = 0;
936 	} else {
937 		assert(!sbuf->usr_entered);
938 		if (!now)
939 			now++; /* 0 is reserved */
940 		sbuf->usr_entered = now;
941 	}
942 }
943 
944 void tee_ta_update_session_utime_suspend(void)
945 {
946 	tee_ta_update_session_utime(true);
947 }
948 
949 void tee_ta_update_session_utime_resume(void)
950 {
951 	tee_ta_update_session_utime(false);
952 }
953 #endif
954