xref: /optee_os/core/kernel/tee_ta_manager.c (revision f0ab1c64777339e3ae508eb13347e0e4518b5d03)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/secure_partition.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/mobj.h>
22 #include <mm/tee_mmu.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <tee_api_types.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_cryp.h>
30 #include <tee/tee_svc_storage.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36 
37 /* This mutex protects the critical section in tee_ta_init_session */
38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
39 /* This condvar is used when waiting for a TA context to become initialized */
40 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
41 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
42 
43 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
44 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
45 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
46 static size_t tee_ta_single_instance_count;
47 #endif
48 
49 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
50 static void lock_single_instance(void)
51 {
52 }
53 
54 static void unlock_single_instance(void)
55 {
56 }
57 
58 static bool has_single_instance_lock(void)
59 {
60 	return false;
61 }
62 #else
63 static void lock_single_instance(void)
64 {
65 	/* Requires tee_ta_mutex to be held */
66 	if (tee_ta_single_instance_thread != thread_get_id()) {
67 		/* Wait until the single-instance lock is available. */
68 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
69 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
70 
71 		tee_ta_single_instance_thread = thread_get_id();
72 		assert(tee_ta_single_instance_count == 0);
73 	}
74 
75 	tee_ta_single_instance_count++;
76 }
77 
78 static void unlock_single_instance(void)
79 {
80 	/* Requires tee_ta_mutex to be held */
81 	assert(tee_ta_single_instance_thread == thread_get_id());
82 	assert(tee_ta_single_instance_count > 0);
83 
84 	tee_ta_single_instance_count--;
85 	if (tee_ta_single_instance_count == 0) {
86 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
87 		condvar_signal(&tee_ta_cv);
88 	}
89 }
90 
91 static bool has_single_instance_lock(void)
92 {
93 	/* Requires tee_ta_mutex to be held */
94 	return tee_ta_single_instance_thread == thread_get_id();
95 }
96 #endif
97 
98 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
99 {
100 	bool rc = true;
101 
102 	if (ctx->flags & TA_FLAG_CONCURRENT)
103 		return true;
104 
105 	mutex_lock(&tee_ta_mutex);
106 
107 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
108 		lock_single_instance();
109 
110 	if (has_single_instance_lock()) {
111 		if (ctx->busy) {
112 			/*
113 			 * We're holding the single-instance lock and the
114 			 * TA is busy, as waiting now would only cause a
115 			 * dead-lock, we release the lock and return false.
116 			 */
117 			rc = false;
118 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
119 				unlock_single_instance();
120 		}
121 	} else {
122 		/*
123 		 * We're not holding the single-instance lock, we're free to
124 		 * wait for the TA to become available.
125 		 */
126 		while (ctx->busy)
127 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
128 	}
129 
130 	/* Either it's already true or we should set it to true */
131 	ctx->busy = true;
132 
133 	mutex_unlock(&tee_ta_mutex);
134 	return rc;
135 }
136 
137 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
138 {
139 	if (!tee_ta_try_set_busy(ctx))
140 		panic();
141 }
142 
143 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
144 {
145 	if (ctx->flags & TA_FLAG_CONCURRENT)
146 		return;
147 
148 	mutex_lock(&tee_ta_mutex);
149 
150 	assert(ctx->busy);
151 	ctx->busy = false;
152 	condvar_signal(&ctx->busy_cv);
153 
154 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
155 		unlock_single_instance();
156 
157 	ctx->initializing = false;
158 
159 	mutex_unlock(&tee_ta_mutex);
160 }
161 
162 static void dec_session_ref_count(struct tee_ta_session *s)
163 {
164 	assert(s->ref_count > 0);
165 	s->ref_count--;
166 	if (s->ref_count == 1)
167 		condvar_signal(&s->refc_cv);
168 }
169 
170 void tee_ta_put_session(struct tee_ta_session *s)
171 {
172 	mutex_lock(&tee_ta_mutex);
173 
174 	if (s->lock_thread == thread_get_id()) {
175 		s->lock_thread = THREAD_ID_INVALID;
176 		condvar_signal(&s->lock_cv);
177 	}
178 	dec_session_ref_count(s);
179 
180 	mutex_unlock(&tee_ta_mutex);
181 }
182 
183 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
184 			struct tee_ta_session_head *open_sessions)
185 {
186 	struct tee_ta_session *s = NULL;
187 	struct tee_ta_session *found = NULL;
188 
189 	TAILQ_FOREACH(s, open_sessions, link) {
190 		if (s->id == id) {
191 			found = s;
192 			break;
193 		}
194 	}
195 
196 	return found;
197 }
198 
199 struct tee_ta_session *tee_ta_find_session(uint32_t id,
200 			struct tee_ta_session_head *open_sessions)
201 {
202 	struct tee_ta_session *s = NULL;
203 
204 	mutex_lock(&tee_ta_mutex);
205 
206 	s = tee_ta_find_session_nolock(id, open_sessions);
207 
208 	mutex_unlock(&tee_ta_mutex);
209 
210 	return s;
211 }
212 
213 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
214 			struct tee_ta_session_head *open_sessions)
215 {
216 	struct tee_ta_session *s;
217 
218 	mutex_lock(&tee_ta_mutex);
219 
220 	while (true) {
221 		s = tee_ta_find_session_nolock(id, open_sessions);
222 		if (!s)
223 			break;
224 		if (s->unlink) {
225 			s = NULL;
226 			break;
227 		}
228 		s->ref_count++;
229 		if (!exclusive)
230 			break;
231 
232 		assert(s->lock_thread != thread_get_id());
233 
234 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
235 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
236 
237 		if (s->unlink) {
238 			dec_session_ref_count(s);
239 			s = NULL;
240 			break;
241 		}
242 
243 		s->lock_thread = thread_get_id();
244 		break;
245 	}
246 
247 	mutex_unlock(&tee_ta_mutex);
248 	return s;
249 }
250 
251 static void tee_ta_unlink_session(struct tee_ta_session *s,
252 			struct tee_ta_session_head *open_sessions)
253 {
254 	mutex_lock(&tee_ta_mutex);
255 
256 	assert(s->ref_count >= 1);
257 	assert(s->lock_thread == thread_get_id());
258 	assert(!s->unlink);
259 
260 	s->unlink = true;
261 	condvar_broadcast(&s->lock_cv);
262 
263 	while (s->ref_count != 1)
264 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
265 
266 	TAILQ_REMOVE(open_sessions, s, link);
267 
268 	mutex_unlock(&tee_ta_mutex);
269 }
270 
271 static void destroy_session(struct tee_ta_session *s,
272 			    struct tee_ta_session_head *open_sessions)
273 {
274 #if defined(CFG_FTRACE_SUPPORT)
275 	if (s->ctx && s->ctx->ops->dump_ftrace) {
276 		tee_ta_push_current_session(s);
277 		s->fbuf = NULL;
278 		s->ctx->ops->dump_ftrace(s->ctx);
279 		tee_ta_pop_current_session();
280 	}
281 #endif
282 
283 	tee_ta_unlink_session(s, open_sessions);
284 #if defined(CFG_TA_GPROF_SUPPORT)
285 	free(s->sbuf);
286 #endif
287 	free(s);
288 }
289 
290 static void destroy_context(struct tee_ta_ctx *ctx)
291 {
292 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
293 
294 	condvar_destroy(&ctx->busy_cv);
295 	pgt_flush_ctx(ctx);
296 	ctx->ops->destroy(ctx);
297 }
298 
299 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
300 {
301 	struct tee_ta_session *sess = NULL;
302 	struct tee_ta_session_head *open_sessions = NULL;
303 	struct tee_ta_ctx *ctx = NULL;
304 	struct user_ta_ctx *utc = NULL;
305 	size_t count = 1; /* start counting the references to the context */
306 
307 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
308 
309 	mutex_lock(&tee_ta_mutex);
310 	nsec_sessions_list_head(&open_sessions);
311 
312 	/*
313 	 * Next two loops will remove all references to the context which is
314 	 * about to be destroyed, but avoiding such operation to the current
315 	 * session. That will be done later in this function, only after
316 	 * the context will be properly destroyed.
317 	 */
318 
319 	/*
320 	 * Scan the entire list of opened sessions by the clients from
321 	 * non-secure world.
322 	 */
323 	TAILQ_FOREACH(sess, open_sessions, link) {
324 		if (sess->ctx == s->ctx && sess != s) {
325 			sess->ctx = NULL;
326 			count++;
327 		}
328 	}
329 
330 	/*
331 	 * Scan all sessions opened from secure side by searching through
332 	 * all available TA instances and for each context, scan all opened
333 	 * sessions.
334 	 */
335 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
336 		if (is_user_ta_ctx(ctx)) {
337 			utc = to_user_ta_ctx(ctx);
338 
339 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
340 				if (sess->ctx == s->ctx && sess != s) {
341 					sess->ctx = NULL;
342 					count++;
343 				}
344 			}
345 		}
346 	}
347 
348 	assert(count == s->ctx->ref_count);
349 
350 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
351 	mutex_unlock(&tee_ta_mutex);
352 
353 	destroy_context(s->ctx);
354 
355 	s->ctx = NULL;
356 }
357 
358 /*
359  * tee_ta_context_find - Find TA in session list based on a UUID (input)
360  * Returns a pointer to the session
361  */
362 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
363 {
364 	struct tee_ta_ctx *ctx;
365 
366 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
367 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
368 			return ctx;
369 	}
370 
371 	return NULL;
372 }
373 
374 /* check if requester (client ID) matches session initial client */
375 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
376 {
377 	if (id == KERN_IDENTITY)
378 		return TEE_SUCCESS;
379 
380 	if (id == NSAPP_IDENTITY) {
381 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
382 			DMSG("nsec tries to hijack TA session");
383 			return TEE_ERROR_ACCESS_DENIED;
384 		}
385 		return TEE_SUCCESS;
386 	}
387 
388 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
389 		DMSG("client id mismatch");
390 		return TEE_ERROR_ACCESS_DENIED;
391 	}
392 	return TEE_SUCCESS;
393 }
394 
395 /*
396  * Check if invocation parameters matches TA properties
397  *
398  * @s - current session handle
399  * @param - already identified memory references hold a valid 'mobj'.
400  *
401  * Policy:
402  * - All TAs can access 'non-secure' shared memory.
403  * - All TAs can access TEE private memory (seccpy)
404  * - Only SDP flagged TAs can accept SDP memory references.
405  */
406 #ifndef CFG_SECURE_DATA_PATH
407 static bool check_params(struct tee_ta_session *sess __unused,
408 			 struct tee_ta_param *param __unused)
409 {
410 	/*
411 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
412 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
413 	 * permissions regarding memory reference parameters.
414 	 */
415 	return true;
416 }
417 #else
418 static bool check_params(struct tee_ta_session *sess,
419 			 struct tee_ta_param *param)
420 {
421 	int n;
422 
423 	/*
424 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
425 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
426 	 */
427 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
428 		return true;
429 
430 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
431 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
432 		struct param_mem *mem = &param->u[n].mem;
433 
434 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
435 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
436 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
437 			continue;
438 		if (!mem->size)
439 			continue;
440 		if (mobj_is_sdp_mem(mem->mobj))
441 			return false;
442 	}
443 	return true;
444 }
445 #endif
446 
447 static void set_invoke_timeout(struct tee_ta_session *sess,
448 				      uint32_t cancel_req_to)
449 {
450 	TEE_Time current_time;
451 	TEE_Time cancel_time;
452 
453 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
454 		goto infinite;
455 
456 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
457 		goto infinite;
458 
459 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
460 			 &cancel_time.seconds))
461 		goto infinite;
462 
463 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
464 	if (cancel_time.millis > 1000) {
465 		if (ADD_OVERFLOW(current_time.seconds, 1,
466 				 &cancel_time.seconds))
467 			goto infinite;
468 
469 		cancel_time.seconds++;
470 		cancel_time.millis -= 1000;
471 	}
472 
473 	sess->cancel_time = cancel_time;
474 	return;
475 
476 infinite:
477 	sess->cancel_time.seconds = UINT32_MAX;
478 	sess->cancel_time.millis = UINT32_MAX;
479 }
480 
481 /*-----------------------------------------------------------------------------
482  * Close a Trusted Application and free available resources
483  *---------------------------------------------------------------------------*/
484 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
485 				struct tee_ta_session_head *open_sessions,
486 				const TEE_Identity *clnt_id)
487 {
488 	struct tee_ta_session *sess;
489 	struct tee_ta_ctx *ctx;
490 	bool keep_alive;
491 
492 	DMSG("csess 0x%" PRIxVA " id %u",
493 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
494 
495 	if (!csess)
496 		return TEE_ERROR_ITEM_NOT_FOUND;
497 
498 	sess = tee_ta_get_session(csess->id, true, open_sessions);
499 
500 	if (!sess) {
501 		EMSG("session 0x%" PRIxVA " to be removed is not found",
502 		     (vaddr_t)csess);
503 		return TEE_ERROR_ITEM_NOT_FOUND;
504 	}
505 
506 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
507 		tee_ta_put_session(sess);
508 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
509 	}
510 
511 	ctx = sess->ctx;
512 	DMSG("Destroy session");
513 
514 	if (!ctx) {
515 		destroy_session(sess, open_sessions);
516 		return TEE_SUCCESS;
517 	}
518 
519 	if (ctx->panicked) {
520 		destroy_session(sess, open_sessions);
521 	} else {
522 		tee_ta_set_busy(ctx);
523 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
524 		ctx->ops->enter_close_session(sess);
525 		destroy_session(sess, open_sessions);
526 		tee_ta_clear_busy(ctx);
527 	}
528 
529 	mutex_lock(&tee_ta_mutex);
530 
531 	if (ctx->ref_count <= 0)
532 		panic();
533 
534 	ctx->ref_count--;
535 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
536 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
537 	if (!ctx->ref_count && !keep_alive) {
538 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
539 		mutex_unlock(&tee_ta_mutex);
540 
541 		destroy_context(ctx);
542 	} else
543 		mutex_unlock(&tee_ta_mutex);
544 
545 	return TEE_SUCCESS;
546 }
547 
548 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
549 						   const TEE_UUID *uuid)
550 {
551 	struct tee_ta_ctx *ctx = NULL;
552 
553 	while (true) {
554 		ctx = tee_ta_context_find(uuid);
555 		if (!ctx)
556 			return TEE_ERROR_ITEM_NOT_FOUND;
557 
558 		if (!is_user_ta_ctx(ctx) ||
559 		    !to_user_ta_ctx(ctx)->is_initializing)
560 			break;
561 		/*
562 		 * Context is still initializing, wait here until it's
563 		 * fully initialized. Note that we're searching for the
564 		 * context again since it may have been removed while we
565 		 * where sleeping.
566 		 */
567 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
568 	}
569 
570 	/*
571 	 * If TA isn't single instance it should be loaded as new
572 	 * instance instead of doing anything with this instance.
573 	 * So tell the caller that we didn't find the TA it the
574 	 * caller will load a new instance.
575 	 */
576 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
577 		return TEE_ERROR_ITEM_NOT_FOUND;
578 
579 	/*
580 	 * The TA is single instance, if it isn't multi session we
581 	 * can't create another session unless its reference is zero
582 	 */
583 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
584 		return TEE_ERROR_BUSY;
585 
586 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
587 
588 	ctx->ref_count++;
589 	s->ctx = ctx;
590 	return TEE_SUCCESS;
591 }
592 
593 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
594 {
595 	struct tee_ta_session *last = NULL;
596 	uint32_t saved = 0;
597 	uint32_t id = 1;
598 
599 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
600 	if (last) {
601 		/* This value is less likely to be already used */
602 		id = last->id + 1;
603 		if (!id)
604 			id++; /* 0 is not valid */
605 	}
606 
607 	saved = id;
608 	do {
609 		if (!tee_ta_find_session_nolock(id, open_sessions))
610 			return id;
611 		id++;
612 		if (!id)
613 			id++;
614 	} while (id != saved);
615 
616 	return 0;
617 }
618 
619 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
620 				struct tee_ta_session_head *open_sessions,
621 				const TEE_UUID *uuid,
622 				struct tee_ta_session **sess)
623 {
624 	TEE_Result res;
625 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
626 
627 	*err = TEE_ORIGIN_TEE;
628 	if (!s)
629 		return TEE_ERROR_OUT_OF_MEMORY;
630 
631 	s->cancel_mask = true;
632 	condvar_init(&s->refc_cv);
633 	condvar_init(&s->lock_cv);
634 	s->lock_thread = THREAD_ID_INVALID;
635 	s->ref_count = 1;
636 
637 	mutex_lock(&tee_ta_mutex);
638 	s->id = new_session_id(open_sessions);
639 	if (!s->id) {
640 		res = TEE_ERROR_OVERFLOW;
641 		goto err_mutex_unlock;
642 	}
643 
644 	TAILQ_INSERT_TAIL(open_sessions, s, link);
645 
646 	/* Look for already loaded TA */
647 	res = tee_ta_init_session_with_context(s, uuid);
648 	mutex_unlock(&tee_ta_mutex);
649 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
650 		goto out;
651 
652 	/* Look for secure partition */
653 	mutex_lock(&tee_ta_mutex);
654 	res = sec_part_init_session(uuid, s);
655 	mutex_unlock(&tee_ta_mutex);
656 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
657 		goto out;
658 
659 	/* Look for pseudo TA */
660 	mutex_lock(&tee_ta_mutex);
661 	res = tee_ta_init_pseudo_ta_session(uuid, s);
662 	mutex_unlock(&tee_ta_mutex);
663 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
664 		goto out;
665 
666 	/* Look for user TA */
667 	res = tee_ta_init_user_ta_session(uuid, s);
668 
669 out:
670 	if (!res) {
671 		*sess = s;
672 		return TEE_SUCCESS;
673 	}
674 
675 	mutex_lock(&tee_ta_mutex);
676 	TAILQ_REMOVE(open_sessions, s, link);
677 err_mutex_unlock:
678 	mutex_unlock(&tee_ta_mutex);
679 	free(s);
680 	return res;
681 }
682 
683 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
684 			       struct tee_ta_session **sess,
685 			       struct tee_ta_session_head *open_sessions,
686 			       const TEE_UUID *uuid,
687 			       const TEE_Identity *clnt_id,
688 			       uint32_t cancel_req_to,
689 			       struct tee_ta_param *param)
690 {
691 	TEE_Result res;
692 	struct tee_ta_session *s = NULL;
693 	struct tee_ta_ctx *ctx;
694 	bool panicked;
695 	bool was_busy = false;
696 
697 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
698 	if (res != TEE_SUCCESS) {
699 		DMSG("init session failed 0x%x", res);
700 		return res;
701 	}
702 
703 	if (!check_params(s, param))
704 		return TEE_ERROR_BAD_PARAMETERS;
705 
706 	ctx = s->ctx;
707 
708 	if (!ctx || ctx->panicked) {
709 		DMSG("panicked, call tee_ta_close_session()");
710 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
711 		*err = TEE_ORIGIN_TEE;
712 		return TEE_ERROR_TARGET_DEAD;
713 	}
714 
715 	*sess = s;
716 	/* Save identity of the owner of the session */
717 	s->clnt_id = *clnt_id;
718 
719 	if (tee_ta_try_set_busy(ctx)) {
720 		set_invoke_timeout(s, cancel_req_to);
721 		res = ctx->ops->enter_open_session(s, param, err);
722 		tee_ta_clear_busy(ctx);
723 	} else {
724 		/* Deadlock avoided */
725 		res = TEE_ERROR_BUSY;
726 		was_busy = true;
727 	}
728 
729 	panicked = ctx->panicked;
730 
731 	tee_ta_put_session(s);
732 	if (panicked || (res != TEE_SUCCESS))
733 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
734 
735 	/*
736 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
737 	 * apart from panicking.
738 	 */
739 	if (panicked || was_busy)
740 		*err = TEE_ORIGIN_TEE;
741 
742 	if (res != TEE_SUCCESS)
743 		EMSG("Failed. Return error 0x%x", res);
744 
745 	return res;
746 }
747 
748 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
749 				 struct tee_ta_session *sess,
750 				 const TEE_Identity *clnt_id,
751 				 uint32_t cancel_req_to, uint32_t cmd,
752 				 struct tee_ta_param *param)
753 {
754 	TEE_Result res;
755 
756 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
757 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
758 
759 	if (!check_params(sess, param))
760 		return TEE_ERROR_BAD_PARAMETERS;
761 
762 	if (!sess->ctx) {
763 		/* The context has been already destroyed */
764 		*err = TEE_ORIGIN_TEE;
765 		return TEE_ERROR_TARGET_DEAD;
766 	} else if (sess->ctx->panicked) {
767 		DMSG("Panicked !");
768 		destroy_ta_ctx_from_session(sess);
769 		*err = TEE_ORIGIN_TEE;
770 		return TEE_ERROR_TARGET_DEAD;
771 	}
772 
773 	tee_ta_set_busy(sess->ctx);
774 
775 	set_invoke_timeout(sess, cancel_req_to);
776 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
777 
778 	tee_ta_clear_busy(sess->ctx);
779 
780 	if (sess->ctx->panicked) {
781 		destroy_ta_ctx_from_session(sess);
782 		*err = TEE_ORIGIN_TEE;
783 		return TEE_ERROR_TARGET_DEAD;
784 	}
785 
786 	/* Short buffer is not an effective error case */
787 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
788 		DMSG("Error: %x of %d", res, *err);
789 
790 	return res;
791 }
792 
793 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
794 				 struct tee_ta_session *sess,
795 				 const TEE_Identity *clnt_id)
796 {
797 	*err = TEE_ORIGIN_TEE;
798 
799 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
800 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
801 
802 	sess->cancel = true;
803 	return TEE_SUCCESS;
804 }
805 
806 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
807 {
808 	TEE_Time current_time;
809 
810 	if (s->cancel_mask)
811 		return false;
812 
813 	if (s->cancel)
814 		return true;
815 
816 	if (s->cancel_time.seconds == UINT32_MAX)
817 		return false;
818 
819 	if (curr_time != NULL)
820 		current_time = *curr_time;
821 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
822 		return false;
823 
824 	if (current_time.seconds > s->cancel_time.seconds ||
825 	    (current_time.seconds == s->cancel_time.seconds &&
826 	     current_time.millis >= s->cancel_time.millis)) {
827 		return true;
828 	}
829 
830 	return false;
831 }
832 
833 static void update_current_ctx(struct thread_specific_data *tsd)
834 {
835 	struct tee_ta_ctx *ctx = NULL;
836 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
837 
838 	if (s) {
839 		if (is_pseudo_ta_ctx(s->ctx))
840 			s = TAILQ_NEXT(s, link_tsd);
841 
842 		if (s)
843 			ctx = s->ctx;
844 	}
845 
846 	if (tsd->ctx != ctx)
847 		tee_mmu_set_ctx(ctx);
848 	/*
849 	 * If current context is of user mode, then it has to be active too.
850 	 */
851 	if (is_user_mode_ctx(ctx) != core_mmu_user_mapping_is_active())
852 		panic("unexpected active mapping");
853 }
854 
855 void tee_ta_push_current_session(struct tee_ta_session *sess)
856 {
857 	struct thread_specific_data *tsd = thread_get_tsd();
858 
859 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
860 	update_current_ctx(tsd);
861 }
862 
863 struct tee_ta_session *tee_ta_pop_current_session(void)
864 {
865 	struct thread_specific_data *tsd = thread_get_tsd();
866 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
867 
868 	if (s) {
869 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
870 		update_current_ctx(tsd);
871 	}
872 	return s;
873 }
874 
875 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
876 {
877 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
878 
879 	if (!s)
880 		return TEE_ERROR_BAD_STATE;
881 	*sess = s;
882 	return TEE_SUCCESS;
883 }
884 
885 struct tee_ta_session *tee_ta_get_calling_session(void)
886 {
887 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
888 
889 	if (s)
890 		s = TAILQ_NEXT(s, link_tsd);
891 	return s;
892 }
893 
894 #if defined(CFG_TA_GPROF_SUPPORT)
895 void tee_ta_gprof_sample_pc(vaddr_t pc)
896 {
897 	struct tee_ta_session *s = NULL;
898 	struct user_ta_ctx *utc = NULL;
899 	struct sample_buf *sbuf = NULL;
900 	TEE_Result res = 0;
901 	size_t idx = 0;
902 
903 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
904 		return;
905 	sbuf = s->sbuf;
906 	if (!sbuf || !sbuf->enabled)
907 		return; /* PC sampling is not enabled */
908 
909 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
910 	if (idx < sbuf->nsamples) {
911 		utc = to_user_ta_ctx(s->ctx);
912 		res = tee_mmu_check_access_rights(&utc->uctx,
913 						  TEE_MEMORY_ACCESS_READ |
914 						  TEE_MEMORY_ACCESS_WRITE |
915 						  TEE_MEMORY_ACCESS_ANY_OWNER,
916 						  (uaddr_t)&sbuf->samples[idx],
917 						  sizeof(*sbuf->samples));
918 		if (res != TEE_SUCCESS)
919 			return;
920 		sbuf->samples[idx]++;
921 	}
922 	sbuf->count++;
923 }
924 
925 static void gprof_update_session_utime(bool suspend, struct tee_ta_session *s,
926 				       uint64_t now)
927 {
928 	struct sample_buf *sbuf = NULL;
929 
930 	sbuf = s->sbuf;
931 	if (!sbuf)
932 		return;
933 
934 	if (suspend) {
935 		assert(sbuf->usr_entered);
936 		sbuf->usr += now - sbuf->usr_entered;
937 		sbuf->usr_entered = 0;
938 	} else {
939 		assert(!sbuf->usr_entered);
940 		if (!now)
941 			now++; /* 0 is reserved */
942 		sbuf->usr_entered = now;
943 	}
944 }
945 
946 /*
947  * Update user-mode CPU time for the current session
948  * @suspend: true if session is being suspended (leaving user mode), false if
949  * it is resumed (entering user mode)
950  */
951 static void tee_ta_update_session_utime(bool suspend)
952 {
953 	struct tee_ta_session *s = NULL;
954 	uint64_t now = 0;
955 
956 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
957 		return;
958 
959 	now = read_cntpct();
960 
961 	gprof_update_session_utime(suspend, s, now);
962 }
963 
964 void tee_ta_update_session_utime_suspend(void)
965 {
966 	tee_ta_update_session_utime(true);
967 }
968 
969 void tee_ta_update_session_utime_resume(void)
970 {
971 	tee_ta_update_session_utime(false);
972 }
973 #endif
974 
975 #if defined(CFG_FTRACE_SUPPORT)
976 static void ftrace_update_times(bool suspend)
977 {
978 	struct tee_ta_session *s = NULL;
979 	struct ftrace_buf *fbuf = NULL;
980 	uint64_t now = 0;
981 	uint32_t i = 0;
982 
983 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
984 		return;
985 
986 	now = read_cntpct();
987 
988 	fbuf = s->fbuf;
989 	if (!fbuf)
990 		return;
991 
992 	if (suspend) {
993 		fbuf->suspend_time = now;
994 	} else {
995 		for (i = 0; i <= fbuf->ret_idx; i++)
996 			fbuf->begin_time[i] += now - fbuf->suspend_time;
997 	}
998 }
999 
1000 void tee_ta_ftrace_update_times_suspend(void)
1001 {
1002 	ftrace_update_times(true);
1003 }
1004 
1005 void tee_ta_ftrace_update_times_resume(void)
1006 {
1007 	ftrace_update_times(false);
1008 }
1009 #endif
1010