xref: /optee_os/core/kernel/tee_ta_manager.c (revision ba2a6adb764f1310ad3c3091d89de84274f86b02)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <malloc.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_cryp.h>
31 #include <tee/tee_svc_storage.h>
32 #include <trace.h>
33 #include <types_ext.h>
34 #include <user_ta_header.h>
35 #include <utee_types.h>
36 #include <util.h>
37 
38 #if defined(CFG_TA_STATS)
39 #define MAX_DUMP_SESS_NUM	(16)
40 struct tee_ta_dump_stats {
41 	TEE_UUID uuid;
42 	uint32_t panicked;	/* True if TA has panicked */
43 	uint32_t sess_num;	/* Number of opened session */
44 	struct malloc_stats heap;
45 };
46 
47 struct tee_ta_dump_ctx {
48 	TEE_UUID uuid;
49 	uint32_t panicked;
50 	bool is_user_ta;
51 	uint32_t sess_num;
52 	uint32_t sess_id[MAX_DUMP_SESS_NUM];
53 };
54 #endif
55 
56 /* This mutex protects the critical section in tee_ta_init_session */
57 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
58 /* This condvar is used when waiting for a TA context to become initialized */
59 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
63 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
64 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
65 static size_t tee_ta_single_instance_count;
66 #endif
67 
68 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
69 static void lock_single_instance(void)
70 {
71 }
72 
73 static void unlock_single_instance(void)
74 {
75 }
76 
77 static bool has_single_instance_lock(void)
78 {
79 	return false;
80 }
81 #else
82 static void lock_single_instance(void)
83 {
84 	/* Requires tee_ta_mutex to be held */
85 	if (tee_ta_single_instance_thread != thread_get_id()) {
86 		/* Wait until the single-instance lock is available. */
87 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
88 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
89 
90 		tee_ta_single_instance_thread = thread_get_id();
91 		assert(tee_ta_single_instance_count == 0);
92 	}
93 
94 	tee_ta_single_instance_count++;
95 }
96 
97 static void unlock_single_instance(void)
98 {
99 	/* Requires tee_ta_mutex to be held */
100 	assert(tee_ta_single_instance_thread == thread_get_id());
101 	assert(tee_ta_single_instance_count > 0);
102 
103 	tee_ta_single_instance_count--;
104 	if (tee_ta_single_instance_count == 0) {
105 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
106 		condvar_signal(&tee_ta_cv);
107 	}
108 }
109 
110 static bool has_single_instance_lock(void)
111 {
112 	/* Requires tee_ta_mutex to be held */
113 	return tee_ta_single_instance_thread == thread_get_id();
114 }
115 #endif
116 
117 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
118 {
119 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
120 	return container_of(sess, struct tee_ta_session, ts_sess);
121 }
122 
123 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
124 {
125 	if (is_ta_ctx(ctx))
126 		return to_ta_ctx(ctx);
127 
128 	if (is_stmm_ctx(ctx))
129 		return &(to_stmm_ctx(ctx)->ta_ctx);
130 
131 	panic("bad context");
132 }
133 
134 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
135 {
136 	bool rc = true;
137 
138 	if (ctx->flags & TA_FLAG_CONCURRENT)
139 		return true;
140 
141 	mutex_lock(&tee_ta_mutex);
142 
143 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
144 		lock_single_instance();
145 
146 	if (has_single_instance_lock()) {
147 		if (ctx->busy) {
148 			/*
149 			 * We're holding the single-instance lock and the
150 			 * TA is busy, as waiting now would only cause a
151 			 * dead-lock, we release the lock and return false.
152 			 */
153 			rc = false;
154 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
155 				unlock_single_instance();
156 		}
157 	} else {
158 		/*
159 		 * We're not holding the single-instance lock, we're free to
160 		 * wait for the TA to become available.
161 		 */
162 		while (ctx->busy)
163 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
164 	}
165 
166 	/* Either it's already true or we should set it to true */
167 	ctx->busy = true;
168 
169 	mutex_unlock(&tee_ta_mutex);
170 	return rc;
171 }
172 
173 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
174 {
175 	if (!tee_ta_try_set_busy(ctx))
176 		panic();
177 }
178 
179 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
180 {
181 	if (ctx->flags & TA_FLAG_CONCURRENT)
182 		return;
183 
184 	mutex_lock(&tee_ta_mutex);
185 
186 	assert(ctx->busy);
187 	ctx->busy = false;
188 	condvar_signal(&ctx->busy_cv);
189 
190 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
191 		unlock_single_instance();
192 
193 	mutex_unlock(&tee_ta_mutex);
194 }
195 
196 static void dec_session_ref_count(struct tee_ta_session *s)
197 {
198 	assert(s->ref_count > 0);
199 	s->ref_count--;
200 	if (s->ref_count == 1)
201 		condvar_signal(&s->refc_cv);
202 }
203 
204 void tee_ta_put_session(struct tee_ta_session *s)
205 {
206 	mutex_lock(&tee_ta_mutex);
207 
208 	if (s->lock_thread == thread_get_id()) {
209 		s->lock_thread = THREAD_ID_INVALID;
210 		condvar_signal(&s->lock_cv);
211 	}
212 	dec_session_ref_count(s);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 }
216 
217 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
218 			struct tee_ta_session_head *open_sessions)
219 {
220 	struct tee_ta_session *s = NULL;
221 	struct tee_ta_session *found = NULL;
222 
223 	TAILQ_FOREACH(s, open_sessions, link) {
224 		if (s->id == id) {
225 			found = s;
226 			break;
227 		}
228 	}
229 
230 	return found;
231 }
232 
233 struct tee_ta_session *tee_ta_find_session(uint32_t id,
234 			struct tee_ta_session_head *open_sessions)
235 {
236 	struct tee_ta_session *s = NULL;
237 
238 	mutex_lock(&tee_ta_mutex);
239 
240 	s = tee_ta_find_session_nolock(id, open_sessions);
241 
242 	mutex_unlock(&tee_ta_mutex);
243 
244 	return s;
245 }
246 
247 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
248 			struct tee_ta_session_head *open_sessions)
249 {
250 	struct tee_ta_session *s;
251 
252 	mutex_lock(&tee_ta_mutex);
253 
254 	while (true) {
255 		s = tee_ta_find_session_nolock(id, open_sessions);
256 		if (!s)
257 			break;
258 		if (s->unlink) {
259 			s = NULL;
260 			break;
261 		}
262 		s->ref_count++;
263 		if (!exclusive)
264 			break;
265 
266 		assert(s->lock_thread != thread_get_id());
267 
268 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
269 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
270 
271 		if (s->unlink) {
272 			dec_session_ref_count(s);
273 			s = NULL;
274 			break;
275 		}
276 
277 		s->lock_thread = thread_get_id();
278 		break;
279 	}
280 
281 	mutex_unlock(&tee_ta_mutex);
282 	return s;
283 }
284 
285 static void tee_ta_unlink_session(struct tee_ta_session *s,
286 			struct tee_ta_session_head *open_sessions)
287 {
288 	mutex_lock(&tee_ta_mutex);
289 
290 	assert(s->ref_count >= 1);
291 	assert(s->lock_thread == thread_get_id());
292 	assert(!s->unlink);
293 
294 	s->unlink = true;
295 	condvar_broadcast(&s->lock_cv);
296 
297 	while (s->ref_count != 1)
298 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
299 
300 	TAILQ_REMOVE(open_sessions, s, link);
301 
302 	mutex_unlock(&tee_ta_mutex);
303 }
304 
305 static void destroy_session(struct tee_ta_session *s,
306 			    struct tee_ta_session_head *open_sessions)
307 {
308 #if defined(CFG_FTRACE_SUPPORT)
309 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
310 		ts_push_current_session(&s->ts_sess);
311 		s->ts_sess.fbuf = NULL;
312 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
313 		ts_pop_current_session();
314 	}
315 #endif
316 
317 	tee_ta_unlink_session(s, open_sessions);
318 #if defined(CFG_TA_GPROF_SUPPORT)
319 	free(s->ts_sess.sbuf);
320 #endif
321 	free(s);
322 }
323 
324 static void destroy_context(struct tee_ta_ctx *ctx)
325 {
326 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
327 
328 	condvar_destroy(&ctx->busy_cv);
329 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
330 }
331 
332 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
333 {
334 	struct tee_ta_session *sess = NULL;
335 	struct tee_ta_session_head *open_sessions = NULL;
336 	struct tee_ta_ctx *ctx = NULL;
337 	struct user_ta_ctx *utc = NULL;
338 	struct ts_ctx *ts_ctx = s->ts_sess.ctx;
339 	size_t count = 1; /* start counting the references to the context */
340 
341 	DMSG("Remove references to context (%#"PRIxVA")", (vaddr_t)ts_ctx);
342 
343 	mutex_lock(&tee_ta_mutex);
344 	nsec_sessions_list_head(&open_sessions);
345 
346 	/*
347 	 * Next two loops will remove all references to the context which is
348 	 * about to be destroyed, but avoiding such operation to the current
349 	 * session. That will be done later in this function, only after
350 	 * the context will be properly destroyed.
351 	 */
352 
353 	/*
354 	 * Scan the entire list of opened sessions by the clients from
355 	 * non-secure world.
356 	 */
357 	TAILQ_FOREACH(sess, open_sessions, link) {
358 		if (sess->ts_sess.ctx == ts_ctx && sess != s) {
359 			sess->ts_sess.ctx = NULL;
360 			count++;
361 		}
362 	}
363 
364 	/*
365 	 * Scan all sessions opened from secure side by searching through
366 	 * all available TA instances and for each context, scan all opened
367 	 * sessions.
368 	 */
369 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
370 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
371 			utc = to_user_ta_ctx(&ctx->ts_ctx);
372 
373 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
374 				if (sess->ts_sess.ctx == ts_ctx &&
375 				    sess != s) {
376 					sess->ts_sess.ctx = NULL;
377 					count++;
378 				}
379 			}
380 		}
381 	}
382 
383 	ctx = ts_to_ta_ctx(ts_ctx);
384 	assert(count == ctx->ref_count);
385 
386 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
387 	mutex_unlock(&tee_ta_mutex);
388 
389 	destroy_context(ctx);
390 	s->ts_sess.ctx = NULL;
391 }
392 
393 /*
394  * tee_ta_context_find - Find TA in session list based on a UUID (input)
395  * Returns a pointer to the session
396  */
397 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
398 {
399 	struct tee_ta_ctx *ctx;
400 
401 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
402 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
403 			return ctx;
404 	}
405 
406 	return NULL;
407 }
408 
409 /* check if requester (client ID) matches session initial client */
410 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
411 {
412 	if (id == KERN_IDENTITY)
413 		return TEE_SUCCESS;
414 
415 	if (id == NSAPP_IDENTITY) {
416 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
417 			DMSG("nsec tries to hijack TA session");
418 			return TEE_ERROR_ACCESS_DENIED;
419 		}
420 		return TEE_SUCCESS;
421 	}
422 
423 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
424 		DMSG("client id mismatch");
425 		return TEE_ERROR_ACCESS_DENIED;
426 	}
427 	return TEE_SUCCESS;
428 }
429 
430 /*
431  * Check if invocation parameters matches TA properties
432  *
433  * @s - current session handle
434  * @param - already identified memory references hold a valid 'mobj'.
435  *
436  * Policy:
437  * - All TAs can access 'non-secure' shared memory.
438  * - All TAs can access TEE private memory (seccpy)
439  * - Only SDP flagged TAs can accept SDP memory references.
440  */
441 #ifndef CFG_SECURE_DATA_PATH
442 static bool check_params(struct tee_ta_session *sess __unused,
443 			 struct tee_ta_param *param __unused)
444 {
445 	/*
446 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
447 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
448 	 * permissions regarding memory reference parameters.
449 	 */
450 	return true;
451 }
452 #else
453 static bool check_params(struct tee_ta_session *sess,
454 			 struct tee_ta_param *param)
455 {
456 	int n;
457 
458 	/*
459 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
460 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
461 	 */
462 	if (sess->ts_sess.ctx &&
463 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
464 		return true;
465 
466 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
467 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
468 		struct param_mem *mem = &param->u[n].mem;
469 
470 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
471 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
472 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
473 			continue;
474 		if (!mem->size)
475 			continue;
476 		if (mobj_is_sdp_mem(mem->mobj))
477 			return false;
478 	}
479 	return true;
480 }
481 #endif
482 
483 static void set_invoke_timeout(struct tee_ta_session *sess,
484 				      uint32_t cancel_req_to)
485 {
486 	TEE_Time current_time;
487 	TEE_Time cancel_time;
488 
489 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
490 		goto infinite;
491 
492 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
493 		goto infinite;
494 
495 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
496 			 &cancel_time.seconds))
497 		goto infinite;
498 
499 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
500 	if (cancel_time.millis > 1000) {
501 		if (ADD_OVERFLOW(current_time.seconds, 1,
502 				 &cancel_time.seconds))
503 			goto infinite;
504 
505 		cancel_time.seconds++;
506 		cancel_time.millis -= 1000;
507 	}
508 
509 	sess->cancel_time = cancel_time;
510 	return;
511 
512 infinite:
513 	sess->cancel_time.seconds = UINT32_MAX;
514 	sess->cancel_time.millis = UINT32_MAX;
515 }
516 
517 /*-----------------------------------------------------------------------------
518  * Close a Trusted Application and free available resources
519  *---------------------------------------------------------------------------*/
520 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
521 				struct tee_ta_session_head *open_sessions,
522 				const TEE_Identity *clnt_id)
523 {
524 	struct tee_ta_session *sess = NULL;
525 	struct tee_ta_ctx *ctx = NULL;
526 	struct ts_ctx *ts_ctx = NULL;
527 	bool keep_alive = false;
528 
529 	DMSG("csess 0x%" PRIxVA " id %u",
530 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
531 
532 	if (!csess)
533 		return TEE_ERROR_ITEM_NOT_FOUND;
534 
535 	sess = tee_ta_get_session(csess->id, true, open_sessions);
536 
537 	if (!sess) {
538 		EMSG("session 0x%" PRIxVA " to be removed is not found",
539 		     (vaddr_t)csess);
540 		return TEE_ERROR_ITEM_NOT_FOUND;
541 	}
542 
543 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
544 		tee_ta_put_session(sess);
545 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
546 	}
547 
548 	DMSG("Destroy session");
549 
550 	ts_ctx = sess->ts_sess.ctx;
551 	if (!ts_ctx) {
552 		destroy_session(sess, open_sessions);
553 		return TEE_SUCCESS;
554 	}
555 
556 	ctx = ts_to_ta_ctx(ts_ctx);
557 	if (ctx->panicked) {
558 		destroy_session(sess, open_sessions);
559 	} else {
560 		tee_ta_set_busy(ctx);
561 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
562 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
563 		destroy_session(sess, open_sessions);
564 		tee_ta_clear_busy(ctx);
565 	}
566 
567 	mutex_lock(&tee_ta_mutex);
568 
569 	if (ctx->ref_count <= 0)
570 		panic();
571 
572 	ctx->ref_count--;
573 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
574 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
575 	if (!ctx->ref_count && !keep_alive) {
576 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
577 		mutex_unlock(&tee_ta_mutex);
578 
579 		destroy_context(ctx);
580 	} else
581 		mutex_unlock(&tee_ta_mutex);
582 
583 	return TEE_SUCCESS;
584 }
585 
586 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
587 						   const TEE_UUID *uuid)
588 {
589 	struct tee_ta_ctx *ctx = NULL;
590 
591 	while (true) {
592 		ctx = tee_ta_context_find(uuid);
593 		if (!ctx)
594 			return TEE_ERROR_ITEM_NOT_FOUND;
595 
596 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
597 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
598 			break;
599 		/*
600 		 * Context is still initializing, wait here until it's
601 		 * fully initialized. Note that we're searching for the
602 		 * context again since it may have been removed while we
603 		 * where sleeping.
604 		 */
605 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
606 	}
607 
608 	/*
609 	 * If TA isn't single instance it should be loaded as new
610 	 * instance instead of doing anything with this instance.
611 	 * So tell the caller that we didn't find the TA it the
612 	 * caller will load a new instance.
613 	 */
614 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
615 		return TEE_ERROR_ITEM_NOT_FOUND;
616 
617 	/*
618 	 * The TA is single instance, if it isn't multi session we
619 	 * can't create another session unless its reference is zero
620 	 */
621 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
622 		return TEE_ERROR_BUSY;
623 
624 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
625 
626 	ctx->ref_count++;
627 	s->ts_sess.ctx = &ctx->ts_ctx;
628 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
629 	return TEE_SUCCESS;
630 }
631 
632 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
633 {
634 	struct tee_ta_session *last = NULL;
635 	uint32_t saved = 0;
636 	uint32_t id = 1;
637 
638 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
639 	if (last) {
640 		/* This value is less likely to be already used */
641 		id = last->id + 1;
642 		if (!id)
643 			id++; /* 0 is not valid */
644 	}
645 
646 	saved = id;
647 	do {
648 		if (!tee_ta_find_session_nolock(id, open_sessions))
649 			return id;
650 		id++;
651 		if (!id)
652 			id++;
653 	} while (id != saved);
654 
655 	return 0;
656 }
657 
658 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
659 				struct tee_ta_session_head *open_sessions,
660 				const TEE_UUID *uuid,
661 				struct tee_ta_session **sess)
662 {
663 	TEE_Result res;
664 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
665 
666 	*err = TEE_ORIGIN_TEE;
667 	if (!s)
668 		return TEE_ERROR_OUT_OF_MEMORY;
669 
670 	s->cancel_mask = true;
671 	condvar_init(&s->refc_cv);
672 	condvar_init(&s->lock_cv);
673 	s->lock_thread = THREAD_ID_INVALID;
674 	s->ref_count = 1;
675 
676 	mutex_lock(&tee_ta_mutex);
677 	s->id = new_session_id(open_sessions);
678 	if (!s->id) {
679 		res = TEE_ERROR_OVERFLOW;
680 		goto err_mutex_unlock;
681 	}
682 
683 	TAILQ_INSERT_TAIL(open_sessions, s, link);
684 
685 	/* Look for already loaded TA */
686 	res = tee_ta_init_session_with_context(s, uuid);
687 	mutex_unlock(&tee_ta_mutex);
688 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
689 		goto out;
690 
691 	/* Look for secure partition */
692 	res = stmm_init_session(uuid, s);
693 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
694 		goto out;
695 
696 	/* Look for pseudo TA */
697 	res = tee_ta_init_pseudo_ta_session(uuid, s);
698 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
699 		goto out;
700 
701 	/* Look for user TA */
702 	res = tee_ta_init_user_ta_session(uuid, s);
703 
704 out:
705 	if (!res) {
706 		*sess = s;
707 		return TEE_SUCCESS;
708 	}
709 
710 	mutex_lock(&tee_ta_mutex);
711 	TAILQ_REMOVE(open_sessions, s, link);
712 err_mutex_unlock:
713 	mutex_unlock(&tee_ta_mutex);
714 	free(s);
715 	return res;
716 }
717 
718 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
719 			       struct tee_ta_session **sess,
720 			       struct tee_ta_session_head *open_sessions,
721 			       const TEE_UUID *uuid,
722 			       const TEE_Identity *clnt_id,
723 			       uint32_t cancel_req_to,
724 			       struct tee_ta_param *param)
725 {
726 	TEE_Result res = TEE_SUCCESS;
727 	struct tee_ta_session *s = NULL;
728 	struct tee_ta_ctx *ctx = NULL;
729 	struct ts_ctx *ts_ctx = NULL;
730 	bool panicked = false;
731 	bool was_busy = false;
732 
733 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
734 	if (res != TEE_SUCCESS) {
735 		DMSG("init session failed 0x%x", res);
736 		return res;
737 	}
738 
739 	if (!check_params(s, param))
740 		return TEE_ERROR_BAD_PARAMETERS;
741 
742 	ts_ctx = s->ts_sess.ctx;
743 	if (ts_ctx)
744 		ctx = ts_to_ta_ctx(ts_ctx);
745 
746 	if (!ctx || ctx->panicked) {
747 		DMSG("panicked, call tee_ta_close_session()");
748 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
749 		*err = TEE_ORIGIN_TEE;
750 		return TEE_ERROR_TARGET_DEAD;
751 	}
752 
753 	*sess = s;
754 	/* Save identity of the owner of the session */
755 	s->clnt_id = *clnt_id;
756 
757 	if (tee_ta_try_set_busy(ctx)) {
758 		s->param = param;
759 		set_invoke_timeout(s, cancel_req_to);
760 		res = ts_ctx->ops->enter_open_session(&s->ts_sess);
761 		tee_ta_clear_busy(ctx);
762 	} else {
763 		/* Deadlock avoided */
764 		res = TEE_ERROR_BUSY;
765 		was_busy = true;
766 	}
767 
768 	panicked = ctx->panicked;
769 	s->param = NULL;
770 
771 	/*
772 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
773 	 * apart from panicking.
774 	 */
775 	if (panicked || was_busy)
776 		*err = TEE_ORIGIN_TEE;
777 	else
778 		*err = s->err_origin;
779 
780 	tee_ta_put_session(s);
781 	if (panicked || res != TEE_SUCCESS)
782 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
783 
784 	if (res != TEE_SUCCESS)
785 		EMSG("Failed. Return error 0x%x", res);
786 
787 	return res;
788 }
789 
790 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
791 				 struct tee_ta_session *sess,
792 				 const TEE_Identity *clnt_id,
793 				 uint32_t cancel_req_to, uint32_t cmd,
794 				 struct tee_ta_param *param)
795 {
796 	struct tee_ta_ctx *ta_ctx = NULL;
797 	struct ts_ctx *ts_ctx = NULL;
798 	TEE_Result res = TEE_SUCCESS;
799 
800 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
801 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
802 
803 	if (!check_params(sess, param))
804 		return TEE_ERROR_BAD_PARAMETERS;
805 
806 	ts_ctx = sess->ts_sess.ctx;
807 	if (!ts_ctx) {
808 		/* The context has been already destroyed */
809 		*err = TEE_ORIGIN_TEE;
810 		return TEE_ERROR_TARGET_DEAD;
811 	}
812 
813 	ta_ctx = ts_to_ta_ctx(ts_ctx);
814 	if (ta_ctx->panicked) {
815 		DMSG("Panicked !");
816 		destroy_ta_ctx_from_session(sess);
817 		*err = TEE_ORIGIN_TEE;
818 		return TEE_ERROR_TARGET_DEAD;
819 	}
820 
821 	tee_ta_set_busy(ta_ctx);
822 
823 	sess->param = param;
824 	set_invoke_timeout(sess, cancel_req_to);
825 	res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
826 
827 	sess->param = NULL;
828 	tee_ta_clear_busy(ta_ctx);
829 
830 	if (ta_ctx->panicked) {
831 		destroy_ta_ctx_from_session(sess);
832 		*err = TEE_ORIGIN_TEE;
833 		return TEE_ERROR_TARGET_DEAD;
834 	}
835 
836 	*err = sess->err_origin;
837 
838 	/* Short buffer is not an effective error case */
839 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
840 		DMSG("Error: %x of %d", res, *err);
841 
842 	return res;
843 }
844 
845 #if defined(CFG_TA_STATS)
846 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
847 				   struct tee_ta_param *param)
848 {
849 	TEE_Result res = TEE_SUCCESS;
850 	struct tee_ta_ctx *ctx = NULL;
851 	struct ts_ctx *ts_ctx = NULL;
852 
853 	ts_ctx = s->ts_sess.ctx;
854 	if (!ts_ctx)
855 		return TEE_ERROR_ITEM_NOT_FOUND;
856 
857 	if (is_user_ta_ctx(ts_ctx) &&
858 	    to_user_ta_ctx(ts_ctx)->uctx.is_initializing)
859 		return TEE_ERROR_BAD_STATE;
860 
861 	ctx = ts_to_ta_ctx(ts_ctx);
862 
863 	if (ctx->panicked)
864 		return TEE_ERROR_TARGET_DEAD;
865 
866 	if (tee_ta_try_set_busy(ctx)) {
867 		s->param = param;
868 		set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
869 		res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
870 		s->param = NULL;
871 		tee_ta_clear_busy(ctx);
872 	} else {
873 		/* Deadlock avoided */
874 		res = TEE_ERROR_BUSY;
875 	}
876 
877 	return res;
878 }
879 
880 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
881 {
882 	struct tee_ta_session *sess = NULL;
883 	struct tee_ta_session_head *open_sessions = NULL;
884 	struct tee_ta_ctx *ctx = NULL;
885 	unsigned int n = 0;
886 
887 	nsec_sessions_list_head(&open_sessions);
888 	/*
889 	 * Scan all sessions opened from secure side by searching through
890 	 * all available TA instances and for each context, scan all opened
891 	 * sessions.
892 	 */
893 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
894 		unsigned int cnt = 0;
895 
896 		if (!is_user_ta_ctx(&ctx->ts_ctx))
897 			continue;
898 
899 		memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
900 		       sizeof(ctx->ts_ctx.uuid));
901 		dump_ctx[n].panicked = ctx->panicked;
902 		dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
903 		TAILQ_FOREACH(sess, open_sessions, link) {
904 			if (sess->ts_sess.ctx == &ctx->ts_ctx) {
905 				if (cnt == MAX_DUMP_SESS_NUM)
906 					break;
907 
908 				dump_ctx[n].sess_id[cnt] = sess->id;
909 				cnt++;
910 			}
911 		}
912 
913 		dump_ctx[n].sess_num = cnt;
914 		n++;
915 	}
916 }
917 
918 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
919 				struct tee_ta_dump_stats *dump_stats,
920 				size_t ta_count)
921 {
922 	TEE_Result res = TEE_SUCCESS;
923 	struct tee_ta_session *sess = NULL;
924 	struct tee_ta_session_head *open_sessions = NULL;
925 	struct tee_ta_param param = { };
926 	unsigned int i = 0;
927 	unsigned int j = 0;
928 
929 	nsec_sessions_list_head(&open_sessions);
930 
931 	for (i = 0; i < ta_count; i++) {
932 		struct tee_ta_dump_stats *stats = &dump_stats[i];
933 
934 		memcpy(&stats->uuid, &dump_ctx[i].uuid,
935 		       sizeof(dump_ctx[i].uuid));
936 		stats->panicked = dump_ctx[i].panicked;
937 		stats->sess_num = dump_ctx[i].sess_num;
938 
939 		/* Find a session from dump context */
940 		for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
941 			sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
942 						  open_sessions);
943 
944 		if (!sess)
945 			continue;
946 		/* If session is existing, get its heap stats */
947 		memset(&param, 0, sizeof(struct tee_ta_param));
948 		param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
949 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
950 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
951 					      TEE_PARAM_TYPE_NONE);
952 		res = dump_ta_memstats(sess, &param);
953 		if (res == TEE_SUCCESS) {
954 			stats->heap.allocated = param.u[0].val.a;
955 			stats->heap.max_allocated = param.u[0].val.b;
956 			stats->heap.size = param.u[1].val.a;
957 			stats->heap.num_alloc_fail = param.u[1].val.b;
958 			stats->heap.biggest_alloc_fail = param.u[2].val.a;
959 			stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
960 		} else {
961 			memset(&stats->heap, 0, sizeof(stats->heap));
962 		}
963 		tee_ta_put_session(sess);
964 	}
965 
966 	return TEE_SUCCESS;
967 }
968 
969 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
970 {
971 	TEE_Result res = TEE_SUCCESS;
972 	struct tee_ta_dump_stats *dump_stats = NULL;
973 	struct tee_ta_dump_ctx *dump_ctx = NULL;
974 	struct tee_ta_ctx *ctx = NULL;
975 	size_t sz = 0;
976 	size_t ta_count = 0;
977 
978 	if (!buf_size)
979 		return TEE_ERROR_BAD_PARAMETERS;
980 
981 	mutex_lock(&tee_ta_mutex);
982 
983 	/* Go through all available TA and calc out the actual buffer size. */
984 	TAILQ_FOREACH(ctx, &tee_ctxes, link)
985 		if (is_user_ta_ctx(&ctx->ts_ctx))
986 			ta_count++;
987 
988 	sz = sizeof(struct tee_ta_dump_stats) * ta_count;
989 	if (!sz) {
990 		/* sz = 0 means there is no UTA, return no item found. */
991 		res = TEE_ERROR_ITEM_NOT_FOUND;
992 	} else if (!buf || *buf_size < sz) {
993 		/*
994 		 * buf is null or pass size less than actual size
995 		 * means caller try to query the buffer size.
996 		 * update *buf_size.
997 		 */
998 		*buf_size = sz;
999 		res = TEE_ERROR_SHORT_BUFFER;
1000 	} else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
1001 		DMSG("Data alignment");
1002 		res = TEE_ERROR_BAD_PARAMETERS;
1003 	} else {
1004 		dump_stats = (struct tee_ta_dump_stats *)buf;
1005 		dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
1006 		if (!dump_ctx)
1007 			res = TEE_ERROR_OUT_OF_MEMORY;
1008 		else
1009 			init_dump_ctx(dump_ctx);
1010 	}
1011 	mutex_unlock(&tee_ta_mutex);
1012 
1013 	if (res != TEE_SUCCESS)
1014 		return res;
1015 
1016 	/* Dump user ta stats by iterating dump_ctx[] */
1017 	res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
1018 	if (res == TEE_SUCCESS)
1019 		*buf_size = sz;
1020 
1021 	free(dump_ctx);
1022 	return res;
1023 }
1024 #endif
1025 
1026 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
1027 				 struct tee_ta_session *sess,
1028 				 const TEE_Identity *clnt_id)
1029 {
1030 	*err = TEE_ORIGIN_TEE;
1031 
1032 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
1033 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1034 
1035 	sess->cancel = true;
1036 	return TEE_SUCCESS;
1037 }
1038 
1039 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1040 {
1041 	TEE_Time current_time;
1042 
1043 	if (s->cancel_mask)
1044 		return false;
1045 
1046 	if (s->cancel)
1047 		return true;
1048 
1049 	if (s->cancel_time.seconds == UINT32_MAX)
1050 		return false;
1051 
1052 	if (curr_time != NULL)
1053 		current_time = *curr_time;
1054 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
1055 		return false;
1056 
1057 	if (current_time.seconds > s->cancel_time.seconds ||
1058 	    (current_time.seconds == s->cancel_time.seconds &&
1059 	     current_time.millis >= s->cancel_time.millis)) {
1060 		return true;
1061 	}
1062 
1063 	return false;
1064 }
1065 
1066 #if defined(CFG_TA_GPROF_SUPPORT)
1067 void tee_ta_gprof_sample_pc(vaddr_t pc)
1068 {
1069 	struct ts_session *s = ts_get_current_session();
1070 	struct user_ta_ctx *utc = NULL;
1071 	struct sample_buf *sbuf = NULL;
1072 	TEE_Result res = 0;
1073 	size_t idx = 0;
1074 
1075 	sbuf = s->sbuf;
1076 	if (!sbuf || !sbuf->enabled)
1077 		return; /* PC sampling is not enabled */
1078 
1079 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1080 	if (idx < sbuf->nsamples) {
1081 		utc = to_user_ta_ctx(s->ctx);
1082 		res = vm_check_access_rights(&utc->uctx,
1083 					     TEE_MEMORY_ACCESS_READ |
1084 					     TEE_MEMORY_ACCESS_WRITE |
1085 					     TEE_MEMORY_ACCESS_ANY_OWNER,
1086 					     (uaddr_t)&sbuf->samples[idx],
1087 					     sizeof(*sbuf->samples));
1088 		if (res != TEE_SUCCESS)
1089 			return;
1090 		sbuf->samples[idx]++;
1091 	}
1092 	sbuf->count++;
1093 }
1094 
1095 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1096 				       uint64_t now)
1097 {
1098 	struct sample_buf *sbuf = s->sbuf;
1099 
1100 	if (!sbuf)
1101 		return;
1102 
1103 	if (suspend) {
1104 		assert(sbuf->usr_entered);
1105 		sbuf->usr += now - sbuf->usr_entered;
1106 		sbuf->usr_entered = 0;
1107 	} else {
1108 		assert(!sbuf->usr_entered);
1109 		if (!now)
1110 			now++; /* 0 is reserved */
1111 		sbuf->usr_entered = now;
1112 	}
1113 }
1114 
1115 /*
1116  * Update user-mode CPU time for the current session
1117  * @suspend: true if session is being suspended (leaving user mode), false if
1118  * it is resumed (entering user mode)
1119  */
1120 static void tee_ta_update_session_utime(bool suspend)
1121 {
1122 	struct ts_session *s = ts_get_current_session();
1123 	uint64_t now = barrier_read_counter_timer();
1124 
1125 	gprof_update_session_utime(suspend, s, now);
1126 }
1127 
1128 void tee_ta_update_session_utime_suspend(void)
1129 {
1130 	tee_ta_update_session_utime(true);
1131 }
1132 
1133 void tee_ta_update_session_utime_resume(void)
1134 {
1135 	tee_ta_update_session_utime(false);
1136 }
1137 #endif
1138 
1139 #if defined(CFG_FTRACE_SUPPORT)
1140 static void ftrace_update_times(bool suspend)
1141 {
1142 	struct ts_session *s = ts_get_current_session_may_fail();
1143 	struct ftrace_buf *fbuf = NULL;
1144 	uint64_t now = 0;
1145 	uint32_t i = 0;
1146 
1147 	if (!s)
1148 		return;
1149 
1150 	now = barrier_read_counter_timer();
1151 
1152 	fbuf = s->fbuf;
1153 	if (!fbuf)
1154 		return;
1155 
1156 	if (suspend) {
1157 		fbuf->suspend_time = now;
1158 	} else {
1159 		for (i = 0; i <= fbuf->ret_idx; i++)
1160 			fbuf->begin_time[i] += now - fbuf->suspend_time;
1161 	}
1162 }
1163 
1164 void tee_ta_ftrace_update_times_suspend(void)
1165 {
1166 	ftrace_update_times(true);
1167 }
1168 
1169 void tee_ta_ftrace_update_times_resume(void)
1170 {
1171 	ftrace_update_times(false);
1172 }
1173 #endif
1174 
1175 bool is_ta_ctx(struct ts_ctx *ctx)
1176 {
1177 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1178 }
1179