xref: /optee_os/core/kernel/tee_ta_manager.c (revision 9d6ac0978c21afae871e675ed95c825cd7c8ec91)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <types_ext.h>
7 #include <stdbool.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <arm.h>
12 #include <assert.h>
13 #include <kernel/ftrace.h>
14 #include <kernel/mutex.h>
15 #include <kernel/panic.h>
16 #include <kernel/pseudo_ta.h>
17 #include <kernel/tee_common.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/tee_ta_manager.h>
20 #include <kernel/tee_time.h>
21 #include <kernel/thread.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_mmu.h>
24 #include <mm/core_memprot.h>
25 #include <mm/mobj.h>
26 #include <mm/tee_mmu.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_svc_cryp.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_storage.h>
31 #include <tee_api_types.h>
32 #include <trace.h>
33 #include <utee_types.h>
34 #include <util.h>
35 
36 /* This mutex protects the critical section in tee_ta_init_session */
37 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
38 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
39 
40 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
41 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
42 static int tee_ta_single_instance_thread = THREAD_ID_INVALID;
43 static size_t tee_ta_single_instance_count;
44 #endif
45 
46 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
47 static void lock_single_instance(void)
48 {
49 }
50 
51 static void unlock_single_instance(void)
52 {
53 }
54 
55 static bool has_single_instance_lock(void)
56 {
57 	return false;
58 }
59 #else
60 static void lock_single_instance(void)
61 {
62 	/* Requires tee_ta_mutex to be held */
63 	if (tee_ta_single_instance_thread != thread_get_id()) {
64 		/* Wait until the single-instance lock is available. */
65 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
66 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
67 
68 		tee_ta_single_instance_thread = thread_get_id();
69 		assert(tee_ta_single_instance_count == 0);
70 	}
71 
72 	tee_ta_single_instance_count++;
73 }
74 
75 static void unlock_single_instance(void)
76 {
77 	/* Requires tee_ta_mutex to be held */
78 	assert(tee_ta_single_instance_thread == thread_get_id());
79 	assert(tee_ta_single_instance_count > 0);
80 
81 	tee_ta_single_instance_count--;
82 	if (tee_ta_single_instance_count == 0) {
83 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
84 		condvar_signal(&tee_ta_cv);
85 	}
86 }
87 
88 static bool has_single_instance_lock(void)
89 {
90 	/* Requires tee_ta_mutex to be held */
91 	return tee_ta_single_instance_thread == thread_get_id();
92 }
93 #endif
94 
95 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
96 {
97 	bool rc = true;
98 
99 	if (ctx->flags & TA_FLAG_CONCURRENT)
100 		return true;
101 
102 	mutex_lock(&tee_ta_mutex);
103 
104 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
105 		lock_single_instance();
106 
107 	if (has_single_instance_lock()) {
108 		if (ctx->busy) {
109 			/*
110 			 * We're holding the single-instance lock and the
111 			 * TA is busy, as waiting now would only cause a
112 			 * dead-lock, we release the lock and return false.
113 			 */
114 			rc = false;
115 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
116 				unlock_single_instance();
117 		}
118 	} else {
119 		/*
120 		 * We're not holding the single-instance lock, we're free to
121 		 * wait for the TA to become available.
122 		 */
123 		while (ctx->busy)
124 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
125 	}
126 
127 	/* Either it's already true or we should set it to true */
128 	ctx->busy = true;
129 
130 	mutex_unlock(&tee_ta_mutex);
131 	return rc;
132 }
133 
134 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
135 {
136 	if (!tee_ta_try_set_busy(ctx))
137 		panic();
138 }
139 
140 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
141 {
142 	if (ctx->flags & TA_FLAG_CONCURRENT)
143 		return;
144 
145 	mutex_lock(&tee_ta_mutex);
146 
147 	assert(ctx->busy);
148 	ctx->busy = false;
149 	condvar_signal(&ctx->busy_cv);
150 
151 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
152 		unlock_single_instance();
153 
154 	mutex_unlock(&tee_ta_mutex);
155 }
156 
157 static void dec_session_ref_count(struct tee_ta_session *s)
158 {
159 	assert(s->ref_count > 0);
160 	s->ref_count--;
161 	if (s->ref_count == 1)
162 		condvar_signal(&s->refc_cv);
163 }
164 
165 void tee_ta_put_session(struct tee_ta_session *s)
166 {
167 	mutex_lock(&tee_ta_mutex);
168 
169 	if (s->lock_thread == thread_get_id()) {
170 		s->lock_thread = THREAD_ID_INVALID;
171 		condvar_signal(&s->lock_cv);
172 	}
173 	dec_session_ref_count(s);
174 
175 	mutex_unlock(&tee_ta_mutex);
176 }
177 
178 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
179 			struct tee_ta_session_head *open_sessions)
180 {
181 	struct tee_ta_session *s = NULL;
182 	struct tee_ta_session *found = NULL;
183 
184 	TAILQ_FOREACH(s, open_sessions, link) {
185 		if (s->id == id) {
186 			found = s;
187 			break;
188 		}
189 	}
190 
191 	return found;
192 }
193 
194 struct tee_ta_session *tee_ta_find_session(uint32_t id,
195 			struct tee_ta_session_head *open_sessions)
196 {
197 	struct tee_ta_session *s = NULL;
198 
199 	mutex_lock(&tee_ta_mutex);
200 
201 	s = tee_ta_find_session_nolock(id, open_sessions);
202 
203 	mutex_unlock(&tee_ta_mutex);
204 
205 	return s;
206 }
207 
208 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
209 			struct tee_ta_session_head *open_sessions)
210 {
211 	struct tee_ta_session *s;
212 
213 	mutex_lock(&tee_ta_mutex);
214 
215 	while (true) {
216 		s = tee_ta_find_session_nolock(id, open_sessions);
217 		if (!s)
218 			break;
219 		if (s->unlink) {
220 			s = NULL;
221 			break;
222 		}
223 		s->ref_count++;
224 		if (!exclusive)
225 			break;
226 
227 		assert(s->lock_thread != thread_get_id());
228 
229 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
230 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
231 
232 		if (s->unlink) {
233 			dec_session_ref_count(s);
234 			s = NULL;
235 			break;
236 		}
237 
238 		s->lock_thread = thread_get_id();
239 		break;
240 	}
241 
242 	mutex_unlock(&tee_ta_mutex);
243 	return s;
244 }
245 
246 static void tee_ta_unlink_session(struct tee_ta_session *s,
247 			struct tee_ta_session_head *open_sessions)
248 {
249 	mutex_lock(&tee_ta_mutex);
250 
251 	assert(s->ref_count >= 1);
252 	assert(s->lock_thread == thread_get_id());
253 	assert(!s->unlink);
254 
255 	s->unlink = true;
256 	condvar_broadcast(&s->lock_cv);
257 
258 	while (s->ref_count != 1)
259 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
260 
261 	TAILQ_REMOVE(open_sessions, s, link);
262 
263 	mutex_unlock(&tee_ta_mutex);
264 }
265 
266 static void destroy_session(struct tee_ta_session *s,
267 			    struct tee_ta_session_head *open_sessions)
268 {
269 #if defined(CFG_TA_FTRACE_SUPPORT)
270 	if (s->ctx) {
271 		tee_ta_push_current_session(s);
272 		ta_fbuf_dump(s);
273 		tee_ta_pop_current_session();
274 	}
275 #endif
276 
277 	tee_ta_unlink_session(s, open_sessions);
278 #if defined(CFG_TA_GPROF_SUPPORT)
279 	free(s->sbuf);
280 #endif
281 	free(s);
282 }
283 
284 static void destroy_context(struct tee_ta_ctx *ctx)
285 {
286 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
287 
288 	condvar_destroy(&ctx->busy_cv);
289 	pgt_flush_ctx(ctx);
290 	ctx->ops->destroy(ctx);
291 }
292 
293 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
294 {
295 	struct tee_ta_session *sess = NULL;
296 	struct tee_ta_session_head *open_sessions = NULL;
297 	struct tee_ta_ctx *ctx = NULL;
298 	struct user_ta_ctx *utc = NULL;
299 	size_t count = 1; /* start counting the references to the context */
300 
301 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
302 
303 	mutex_lock(&tee_ta_mutex);
304 	nsec_sessions_list_head(&open_sessions);
305 
306 	/*
307 	 * Next two loops will remove all references to the context which is
308 	 * about to be destroyed, but avoiding such operation to the current
309 	 * session. That will be done later in this function, only after
310 	 * the context will be properly destroyed.
311 	 */
312 
313 	/*
314 	 * Scan the entire list of opened sessions by the clients from
315 	 * non-secure world.
316 	 */
317 	TAILQ_FOREACH(sess, open_sessions, link) {
318 		if (sess->ctx == s->ctx && sess != s) {
319 			sess->ctx = NULL;
320 			count++;
321 		}
322 	}
323 
324 	/*
325 	 * Scan all sessions opened from secure side by searching through
326 	 * all available TA instances and for each context, scan all opened
327 	 * sessions.
328 	 */
329 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
330 		if (is_user_ta_ctx(ctx)) {
331 			utc = to_user_ta_ctx(ctx);
332 
333 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
334 				if (sess->ctx == s->ctx && sess != s) {
335 					sess->ctx = NULL;
336 					count++;
337 				}
338 			}
339 		}
340 	}
341 
342 	assert(count == s->ctx->ref_count);
343 
344 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
345 	mutex_unlock(&tee_ta_mutex);
346 
347 	destroy_context(s->ctx);
348 
349 	s->ctx = NULL;
350 }
351 
352 /*
353  * tee_ta_context_find - Find TA in session list based on a UUID (input)
354  * Returns a pointer to the session
355  */
356 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
357 {
358 	struct tee_ta_ctx *ctx;
359 
360 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
361 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
362 			return ctx;
363 	}
364 
365 	return NULL;
366 }
367 
368 /* check if requester (client ID) matches session initial client */
369 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
370 {
371 	if (id == KERN_IDENTITY)
372 		return TEE_SUCCESS;
373 
374 	if (id == NSAPP_IDENTITY) {
375 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
376 			DMSG("nsec tries to hijack TA session");
377 			return TEE_ERROR_ACCESS_DENIED;
378 		}
379 		return TEE_SUCCESS;
380 	}
381 
382 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
383 		DMSG("client id mismatch");
384 		return TEE_ERROR_ACCESS_DENIED;
385 	}
386 	return TEE_SUCCESS;
387 }
388 
389 /*
390  * Check if invocation parameters matches TA properties
391  *
392  * @s - current session handle
393  * @param - already identified memory references hold a valid 'mobj'.
394  *
395  * Policy:
396  * - All TAs can access 'non-secure' shared memory.
397  * - All TAs can access TEE private memory (seccpy)
398  * - Only SDP flagged TAs can accept SDP memory references.
399  */
400 #ifndef CFG_SECURE_DATA_PATH
401 static bool check_params(struct tee_ta_session *sess __unused,
402 			 struct tee_ta_param *param __unused)
403 {
404 	/*
405 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
406 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
407 	 * permissions regarding memory reference parameters.
408 	 */
409 	return true;
410 }
411 #else
412 static bool check_params(struct tee_ta_session *sess,
413 			 struct tee_ta_param *param)
414 {
415 	int n;
416 
417 	/*
418 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
419 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
420 	 */
421 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
422 		return true;
423 
424 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
425 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
426 		struct param_mem *mem = &param->u[n].mem;
427 
428 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
429 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
430 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
431 			continue;
432 		if (!mem->size)
433 			continue;
434 		if (mobj_is_sdp_mem(mem->mobj))
435 			return false;
436 	}
437 	return true;
438 }
439 #endif
440 
441 static void set_invoke_timeout(struct tee_ta_session *sess,
442 				      uint32_t cancel_req_to)
443 {
444 	TEE_Time current_time;
445 	TEE_Time cancel_time;
446 
447 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
448 		goto infinite;
449 
450 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
451 		goto infinite;
452 
453 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
454 			 &cancel_time.seconds))
455 		goto infinite;
456 
457 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
458 	if (cancel_time.millis > 1000) {
459 		if (ADD_OVERFLOW(current_time.seconds, 1,
460 				 &cancel_time.seconds))
461 			goto infinite;
462 
463 		cancel_time.seconds++;
464 		cancel_time.millis -= 1000;
465 	}
466 
467 	sess->cancel_time = cancel_time;
468 	return;
469 
470 infinite:
471 	sess->cancel_time.seconds = UINT32_MAX;
472 	sess->cancel_time.millis = UINT32_MAX;
473 }
474 
475 /*-----------------------------------------------------------------------------
476  * Close a Trusted Application and free available resources
477  *---------------------------------------------------------------------------*/
478 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
479 				struct tee_ta_session_head *open_sessions,
480 				const TEE_Identity *clnt_id)
481 {
482 	struct tee_ta_session *sess;
483 	struct tee_ta_ctx *ctx;
484 	bool keep_alive;
485 
486 	DMSG("csess 0x%" PRIxVA " id %u", (vaddr_t)csess, csess->id);
487 
488 	if (!csess)
489 		return TEE_ERROR_ITEM_NOT_FOUND;
490 
491 	sess = tee_ta_get_session(csess->id, true, open_sessions);
492 
493 	if (!sess) {
494 		EMSG("session 0x%" PRIxVA " to be removed is not found",
495 		     (vaddr_t)csess);
496 		return TEE_ERROR_ITEM_NOT_FOUND;
497 	}
498 
499 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
500 		tee_ta_put_session(sess);
501 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
502 	}
503 
504 	ctx = sess->ctx;
505 	DMSG("Destroy session");
506 
507 	if (!ctx) {
508 		destroy_session(sess, open_sessions);
509 		return TEE_SUCCESS;
510 	}
511 
512 	if (ctx->panicked) {
513 		destroy_session(sess, open_sessions);
514 	} else {
515 		tee_ta_set_busy(ctx);
516 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
517 		ctx->ops->enter_close_session(sess);
518 		destroy_session(sess, open_sessions);
519 		tee_ta_clear_busy(ctx);
520 	}
521 
522 	mutex_lock(&tee_ta_mutex);
523 
524 	if (ctx->ref_count <= 0)
525 		panic();
526 
527 	ctx->ref_count--;
528 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
529 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
530 	if (!ctx->ref_count && !keep_alive) {
531 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
532 		mutex_unlock(&tee_ta_mutex);
533 
534 		destroy_context(ctx);
535 	} else
536 		mutex_unlock(&tee_ta_mutex);
537 
538 	return TEE_SUCCESS;
539 }
540 
541 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
542 			struct tee_ta_session *s)
543 {
544 	/*
545 	 * If TA isn't single instance it should be loaded as new
546 	 * instance instead of doing anything with this instance.
547 	 * So tell the caller that we didn't find the TA it the
548 	 * caller will load a new instance.
549 	 */
550 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
551 		return TEE_ERROR_ITEM_NOT_FOUND;
552 
553 	/*
554 	 * The TA is single instance, if it isn't multi session we
555 	 * can't create another session unless its reference is zero
556 	 */
557 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
558 		return TEE_ERROR_BUSY;
559 
560 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
561 
562 	ctx->ref_count++;
563 	s->ctx = ctx;
564 	return TEE_SUCCESS;
565 }
566 
567 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
568 {
569 	struct tee_ta_session *last = NULL;
570 	uint32_t saved = 0;
571 	uint32_t id = 1;
572 
573 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
574 	if (last) {
575 		/* This value is less likely to be already used */
576 		id = last->id + 1;
577 		if (!id)
578 			id++; /* 0 is not valid */
579 	}
580 
581 	saved = id;
582 	do {
583 		if (!tee_ta_find_session_nolock(id, open_sessions))
584 			return id;
585 		id++;
586 		if (!id)
587 			id++;
588 	} while (id != saved);
589 
590 	return 0;
591 }
592 
593 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
594 				struct tee_ta_session_head *open_sessions,
595 				const TEE_UUID *uuid,
596 				struct tee_ta_session **sess)
597 {
598 	TEE_Result res;
599 	struct tee_ta_ctx *ctx;
600 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
601 
602 	*err = TEE_ORIGIN_TEE;
603 	if (!s)
604 		return TEE_ERROR_OUT_OF_MEMORY;
605 
606 	s->cancel_mask = true;
607 	condvar_init(&s->refc_cv);
608 	condvar_init(&s->lock_cv);
609 	s->lock_thread = THREAD_ID_INVALID;
610 	s->ref_count = 1;
611 
612 
613 	/*
614 	 * We take the global TA mutex here and hold it while doing
615 	 * RPC to load the TA. This big critical section should be broken
616 	 * down into smaller pieces.
617 	 */
618 
619 
620 	mutex_lock(&tee_ta_mutex);
621 	s->id = new_session_id(open_sessions);
622 	if (!s->id) {
623 		res = TEE_ERROR_OVERFLOW;
624 		goto out;
625 	}
626 	TAILQ_INSERT_TAIL(open_sessions, s, link);
627 
628 	/* Look for already loaded TA */
629 	ctx = tee_ta_context_find(uuid);
630 	if (ctx) {
631 		res = tee_ta_init_session_with_context(ctx, s);
632 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
633 			goto out;
634 	}
635 
636 	/* Look for pseudo TA */
637 	res = tee_ta_init_pseudo_ta_session(uuid, s);
638 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
639 		goto out;
640 
641 	/* Look for user TA */
642 	res = tee_ta_init_user_ta_session(uuid, s);
643 
644 out:
645 	if (res == TEE_SUCCESS) {
646 		*sess = s;
647 	} else {
648 		TAILQ_REMOVE(open_sessions, s, link);
649 		free(s);
650 	}
651 	mutex_unlock(&tee_ta_mutex);
652 	return res;
653 }
654 
655 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
656 			       struct tee_ta_session **sess,
657 			       struct tee_ta_session_head *open_sessions,
658 			       const TEE_UUID *uuid,
659 			       const TEE_Identity *clnt_id,
660 			       uint32_t cancel_req_to,
661 			       struct tee_ta_param *param)
662 {
663 	TEE_Result res;
664 	struct tee_ta_session *s = NULL;
665 	struct tee_ta_ctx *ctx;
666 	bool panicked;
667 	bool was_busy = false;
668 
669 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
670 	if (res != TEE_SUCCESS) {
671 		DMSG("init session failed 0x%x", res);
672 		return res;
673 	}
674 
675 	if (!check_params(s, param))
676 		return TEE_ERROR_BAD_PARAMETERS;
677 
678 	ctx = s->ctx;
679 
680 	if (!ctx || ctx->panicked) {
681 		DMSG("panicked, call tee_ta_close_session()");
682 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
683 		*err = TEE_ORIGIN_TEE;
684 		return TEE_ERROR_TARGET_DEAD;
685 	}
686 
687 	*sess = s;
688 	/* Save identity of the owner of the session */
689 	s->clnt_id = *clnt_id;
690 
691 	if (tee_ta_try_set_busy(ctx)) {
692 		set_invoke_timeout(s, cancel_req_to);
693 		res = ctx->ops->enter_open_session(s, param, err);
694 		tee_ta_clear_busy(ctx);
695 	} else {
696 		/* Deadlock avoided */
697 		res = TEE_ERROR_BUSY;
698 		was_busy = true;
699 	}
700 
701 	panicked = ctx->panicked;
702 
703 	tee_ta_put_session(s);
704 	if (panicked || (res != TEE_SUCCESS))
705 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
706 
707 	/*
708 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
709 	 * apart from panicking.
710 	 */
711 	if (panicked || was_busy)
712 		*err = TEE_ORIGIN_TEE;
713 	else
714 		*err = TEE_ORIGIN_TRUSTED_APP;
715 
716 	if (res != TEE_SUCCESS)
717 		EMSG("Failed. Return error 0x%x", res);
718 
719 	return res;
720 }
721 
722 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
723 				 struct tee_ta_session *sess,
724 				 const TEE_Identity *clnt_id,
725 				 uint32_t cancel_req_to, uint32_t cmd,
726 				 struct tee_ta_param *param)
727 {
728 	TEE_Result res;
729 
730 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
731 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
732 
733 	if (!check_params(sess, param))
734 		return TEE_ERROR_BAD_PARAMETERS;
735 
736 	if (!sess->ctx) {
737 		/* The context has been already destroyed */
738 		*err = TEE_ORIGIN_TEE;
739 		return TEE_ERROR_TARGET_DEAD;
740 	} else if (sess->ctx->panicked) {
741 		DMSG("Panicked !");
742 		destroy_ta_ctx_from_session(sess);
743 		*err = TEE_ORIGIN_TEE;
744 		return TEE_ERROR_TARGET_DEAD;
745 	}
746 
747 	tee_ta_set_busy(sess->ctx);
748 
749 	set_invoke_timeout(sess, cancel_req_to);
750 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
751 
752 	tee_ta_clear_busy(sess->ctx);
753 
754 	if (sess->ctx->panicked) {
755 		destroy_ta_ctx_from_session(sess);
756 		*err = TEE_ORIGIN_TEE;
757 		return TEE_ERROR_TARGET_DEAD;
758 	}
759 
760 	/* Short buffer is not an effective error case */
761 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
762 		DMSG("Error: %x of %d", res, *err);
763 
764 	return res;
765 }
766 
767 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
768 				 struct tee_ta_session *sess,
769 				 const TEE_Identity *clnt_id)
770 {
771 	*err = TEE_ORIGIN_TEE;
772 
773 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
774 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
775 
776 	sess->cancel = true;
777 	return TEE_SUCCESS;
778 }
779 
780 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
781 {
782 	TEE_Time current_time;
783 
784 	if (s->cancel_mask)
785 		return false;
786 
787 	if (s->cancel)
788 		return true;
789 
790 	if (s->cancel_time.seconds == UINT32_MAX)
791 		return false;
792 
793 	if (curr_time != NULL)
794 		current_time = *curr_time;
795 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
796 		return false;
797 
798 	if (current_time.seconds > s->cancel_time.seconds ||
799 	    (current_time.seconds == s->cancel_time.seconds &&
800 	     current_time.millis >= s->cancel_time.millis)) {
801 		return true;
802 	}
803 
804 	return false;
805 }
806 
807 static void update_current_ctx(struct thread_specific_data *tsd)
808 {
809 	struct tee_ta_ctx *ctx = NULL;
810 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
811 
812 	if (s) {
813 		if (is_pseudo_ta_ctx(s->ctx))
814 			s = TAILQ_NEXT(s, link_tsd);
815 
816 		if (s)
817 			ctx = s->ctx;
818 	}
819 
820 	if (tsd->ctx != ctx)
821 		tee_mmu_set_ctx(ctx);
822 	/*
823 	 * If ctx->mmu == NULL we must not have user mapping active,
824 	 * if ctx->mmu != NULL we must have user mapping active.
825 	 */
826 	if (((is_user_ta_ctx(ctx) ?
827 			to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
828 					core_mmu_user_mapping_is_active())
829 		panic("unexpected active mapping");
830 }
831 
832 void tee_ta_push_current_session(struct tee_ta_session *sess)
833 {
834 	struct thread_specific_data *tsd = thread_get_tsd();
835 
836 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
837 	update_current_ctx(tsd);
838 }
839 
840 struct tee_ta_session *tee_ta_pop_current_session(void)
841 {
842 	struct thread_specific_data *tsd = thread_get_tsd();
843 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
844 
845 	if (s) {
846 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
847 		update_current_ctx(tsd);
848 	}
849 	return s;
850 }
851 
852 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
853 {
854 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
855 
856 	if (!s)
857 		return TEE_ERROR_BAD_STATE;
858 	*sess = s;
859 	return TEE_SUCCESS;
860 }
861 
862 struct tee_ta_session *tee_ta_get_calling_session(void)
863 {
864 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
865 
866 	if (s)
867 		s = TAILQ_NEXT(s, link_tsd);
868 	return s;
869 }
870 
871 /*
872  * dump_state - Display TA state as an error log.
873  */
874 static void dump_state(struct tee_ta_ctx *ctx)
875 {
876 	struct tee_ta_session *s = NULL;
877 	bool active __maybe_unused;
878 
879 	if (!ctx) {
880 		EMSG("No TA status: null context reference");
881 		return;
882 	}
883 
884 	active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
885 		  s && s->ctx == ctx);
886 
887 	EMSG_RAW("Status of TA %pUl (%p) %s", (void *)&ctx->uuid, (void *)ctx,
888 		active ? "(active)" : "");
889 	ctx->ops->dump_state(ctx);
890 }
891 
892 void tee_ta_dump_current(void)
893 {
894 	struct tee_ta_session *s = NULL;
895 
896 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS) {
897 		EMSG("no valid session found, cannot log TA status");
898 		return;
899 	}
900 
901 	dump_state(s->ctx);
902 
903 	ta_fbuf_dump(s);
904 }
905 
906 #if defined(CFG_TA_GPROF_SUPPORT)
907 void tee_ta_gprof_sample_pc(vaddr_t pc)
908 {
909 	struct tee_ta_session *s;
910 	struct sample_buf *sbuf;
911 	size_t idx;
912 
913 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
914 		return;
915 	sbuf = s->sbuf;
916 	if (!sbuf || !sbuf->enabled)
917 		return; /* PC sampling is not enabled */
918 
919 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
920 	if (idx < sbuf->nsamples)
921 		sbuf->samples[idx]++;
922 	sbuf->count++;
923 }
924 
925 /*
926  * Update user-mode CPU time for the current session
927  * @suspend: true if session is being suspended (leaving user mode), false if
928  * it is resumed (entering user mode)
929  */
930 static void tee_ta_update_session_utime(bool suspend)
931 {
932 	struct tee_ta_session *s;
933 	struct sample_buf *sbuf;
934 	uint64_t now;
935 
936 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
937 		return;
938 	sbuf = s->sbuf;
939 	if (!sbuf)
940 		return;
941 	now = read_cntpct();
942 	if (suspend) {
943 		assert(sbuf->usr_entered);
944 		sbuf->usr += now - sbuf->usr_entered;
945 		sbuf->usr_entered = 0;
946 	} else {
947 		assert(!sbuf->usr_entered);
948 		if (!now)
949 			now++; /* 0 is reserved */
950 		sbuf->usr_entered = now;
951 	}
952 }
953 
954 void tee_ta_update_session_utime_suspend(void)
955 {
956 	tee_ta_update_session_utime(true);
957 }
958 
959 void tee_ta_update_session_utime_resume(void)
960 {
961 	tee_ta_update_session_utime(false);
962 }
963 #endif
964