xref: /optee_os/core/kernel/tee_ta_manager.c (revision 8fab4371e333936b8d965c1fa6f853774c2c0252)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <malloc.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <tee/tee_svc_cryp.h>
31 #include <tee/tee_svc_storage.h>
32 #include <trace.h>
33 #include <types_ext.h>
34 #include <user_ta_header.h>
35 #include <utee_types.h>
36 #include <util.h>
37 
38 #if defined(CFG_TA_STATS)
39 #define MAX_DUMP_SESS_NUM	(16)
40 struct tee_ta_dump_stats {
41 	TEE_UUID uuid;
42 	uint32_t panicked;	/* True if TA has panicked */
43 	uint32_t sess_num;	/* Number of opened session */
44 	struct malloc_stats heap;
45 };
46 
47 struct tee_ta_dump_ctx {
48 	TEE_UUID uuid;
49 	uint32_t panicked;
50 	bool is_user_ta;
51 	uint32_t sess_num;
52 	uint32_t sess_id[MAX_DUMP_SESS_NUM];
53 };
54 #endif
55 
56 /* This mutex protects the critical section in tee_ta_init_session */
57 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
58 /* This condvar is used when waiting for a TA context to become initialized */
59 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
60 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
61 
62 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
63 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
64 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
65 static size_t tee_ta_single_instance_count;
66 #endif
67 
68 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
69 static void lock_single_instance(void)
70 {
71 }
72 
73 static void unlock_single_instance(void)
74 {
75 }
76 
77 static bool has_single_instance_lock(void)
78 {
79 	return false;
80 }
81 #else
82 static void lock_single_instance(void)
83 {
84 	/* Requires tee_ta_mutex to be held */
85 	if (tee_ta_single_instance_thread != thread_get_id()) {
86 		/* Wait until the single-instance lock is available. */
87 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
88 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
89 
90 		tee_ta_single_instance_thread = thread_get_id();
91 		assert(tee_ta_single_instance_count == 0);
92 	}
93 
94 	tee_ta_single_instance_count++;
95 }
96 
97 static void unlock_single_instance(void)
98 {
99 	/* Requires tee_ta_mutex to be held */
100 	assert(tee_ta_single_instance_thread == thread_get_id());
101 	assert(tee_ta_single_instance_count > 0);
102 
103 	tee_ta_single_instance_count--;
104 	if (tee_ta_single_instance_count == 0) {
105 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
106 		condvar_signal(&tee_ta_cv);
107 	}
108 }
109 
110 static bool has_single_instance_lock(void)
111 {
112 	/* Requires tee_ta_mutex to be held */
113 	return tee_ta_single_instance_thread == thread_get_id();
114 }
115 #endif
116 
117 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
118 {
119 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
120 	return container_of(sess, struct tee_ta_session, ts_sess);
121 }
122 
123 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
124 {
125 	if (is_ta_ctx(ctx))
126 		return to_ta_ctx(ctx);
127 
128 	if (is_stmm_ctx(ctx))
129 		return &(to_stmm_ctx(ctx)->ta_ctx);
130 
131 	panic("bad context");
132 }
133 
134 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
135 {
136 	bool rc = true;
137 
138 	if (ctx->flags & TA_FLAG_CONCURRENT)
139 		return true;
140 
141 	mutex_lock(&tee_ta_mutex);
142 
143 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
144 		lock_single_instance();
145 
146 	if (has_single_instance_lock()) {
147 		if (ctx->busy) {
148 			/*
149 			 * We're holding the single-instance lock and the
150 			 * TA is busy, as waiting now would only cause a
151 			 * dead-lock, we release the lock and return false.
152 			 */
153 			rc = false;
154 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
155 				unlock_single_instance();
156 		}
157 	} else {
158 		/*
159 		 * We're not holding the single-instance lock, we're free to
160 		 * wait for the TA to become available.
161 		 */
162 		while (ctx->busy)
163 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
164 	}
165 
166 	/* Either it's already true or we should set it to true */
167 	ctx->busy = true;
168 
169 	mutex_unlock(&tee_ta_mutex);
170 	return rc;
171 }
172 
173 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
174 {
175 	if (!tee_ta_try_set_busy(ctx))
176 		panic();
177 }
178 
179 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
180 {
181 	if (ctx->flags & TA_FLAG_CONCURRENT)
182 		return;
183 
184 	mutex_lock(&tee_ta_mutex);
185 
186 	assert(ctx->busy);
187 	ctx->busy = false;
188 	condvar_signal(&ctx->busy_cv);
189 
190 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
191 		unlock_single_instance();
192 
193 	mutex_unlock(&tee_ta_mutex);
194 }
195 
196 static void dec_session_ref_count(struct tee_ta_session *s)
197 {
198 	assert(s->ref_count > 0);
199 	s->ref_count--;
200 	if (s->ref_count == 1)
201 		condvar_signal(&s->refc_cv);
202 }
203 
204 void tee_ta_put_session(struct tee_ta_session *s)
205 {
206 	mutex_lock(&tee_ta_mutex);
207 
208 	if (s->lock_thread == thread_get_id()) {
209 		s->lock_thread = THREAD_ID_INVALID;
210 		condvar_signal(&s->lock_cv);
211 	}
212 	dec_session_ref_count(s);
213 
214 	mutex_unlock(&tee_ta_mutex);
215 }
216 
217 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
218 			struct tee_ta_session_head *open_sessions)
219 {
220 	struct tee_ta_session *s = NULL;
221 	struct tee_ta_session *found = NULL;
222 
223 	TAILQ_FOREACH(s, open_sessions, link) {
224 		if (s->id == id) {
225 			found = s;
226 			break;
227 		}
228 	}
229 
230 	return found;
231 }
232 
233 struct tee_ta_session *tee_ta_find_session(uint32_t id,
234 			struct tee_ta_session_head *open_sessions)
235 {
236 	struct tee_ta_session *s = NULL;
237 
238 	mutex_lock(&tee_ta_mutex);
239 
240 	s = tee_ta_find_session_nolock(id, open_sessions);
241 
242 	mutex_unlock(&tee_ta_mutex);
243 
244 	return s;
245 }
246 
247 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
248 			struct tee_ta_session_head *open_sessions)
249 {
250 	struct tee_ta_session *s;
251 
252 	mutex_lock(&tee_ta_mutex);
253 
254 	while (true) {
255 		s = tee_ta_find_session_nolock(id, open_sessions);
256 		if (!s)
257 			break;
258 		if (s->unlink) {
259 			s = NULL;
260 			break;
261 		}
262 		s->ref_count++;
263 		if (!exclusive)
264 			break;
265 
266 		assert(s->lock_thread != thread_get_id());
267 
268 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
269 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
270 
271 		if (s->unlink) {
272 			dec_session_ref_count(s);
273 			s = NULL;
274 			break;
275 		}
276 
277 		s->lock_thread = thread_get_id();
278 		break;
279 	}
280 
281 	mutex_unlock(&tee_ta_mutex);
282 	return s;
283 }
284 
285 static void tee_ta_unlink_session(struct tee_ta_session *s,
286 			struct tee_ta_session_head *open_sessions)
287 {
288 	mutex_lock(&tee_ta_mutex);
289 
290 	assert(s->ref_count >= 1);
291 	assert(s->lock_thread == thread_get_id());
292 	assert(!s->unlink);
293 
294 	s->unlink = true;
295 	condvar_broadcast(&s->lock_cv);
296 
297 	while (s->ref_count != 1)
298 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
299 
300 	TAILQ_REMOVE(open_sessions, s, link);
301 
302 	mutex_unlock(&tee_ta_mutex);
303 }
304 
305 static void destroy_session(struct tee_ta_session *s,
306 			    struct tee_ta_session_head *open_sessions)
307 {
308 #if defined(CFG_FTRACE_SUPPORT)
309 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
310 		ts_push_current_session(&s->ts_sess);
311 		s->ts_sess.fbuf = NULL;
312 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
313 		ts_pop_current_session();
314 	}
315 #endif
316 
317 	tee_ta_unlink_session(s, open_sessions);
318 #if defined(CFG_TA_GPROF_SUPPORT)
319 	free(s->ts_sess.sbuf);
320 #endif
321 	free(s);
322 }
323 
324 static void destroy_context(struct tee_ta_ctx *ctx)
325 {
326 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
327 
328 	condvar_destroy(&ctx->busy_cv);
329 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
330 }
331 
332 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
333 {
334 	struct tee_ta_session *sess = NULL;
335 	struct tee_ta_session_head *open_sessions = NULL;
336 	struct tee_ta_ctx *ctx = NULL;
337 	struct user_ta_ctx *utc = NULL;
338 	struct ts_ctx *ts_ctx = s->ts_sess.ctx;
339 	size_t count = 1; /* start counting the references to the context */
340 
341 	DMSG("Remove references to context (%#"PRIxVA")", (vaddr_t)ts_ctx);
342 
343 	mutex_lock(&tee_ta_mutex);
344 	nsec_sessions_list_head(&open_sessions);
345 
346 	/*
347 	 * Next two loops will remove all references to the context which is
348 	 * about to be destroyed, but avoiding such operation to the current
349 	 * session. That will be done later in this function, only after
350 	 * the context will be properly destroyed.
351 	 */
352 
353 	/*
354 	 * Scan the entire list of opened sessions by the clients from
355 	 * non-secure world.
356 	 */
357 	TAILQ_FOREACH(sess, open_sessions, link) {
358 		if (sess->ts_sess.ctx == ts_ctx && sess != s) {
359 			sess->ts_sess.ctx = NULL;
360 			count++;
361 		}
362 	}
363 
364 	/*
365 	 * Scan all sessions opened from secure side by searching through
366 	 * all available TA instances and for each context, scan all opened
367 	 * sessions.
368 	 */
369 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
370 		if (is_user_ta_ctx(&ctx->ts_ctx)) {
371 			utc = to_user_ta_ctx(&ctx->ts_ctx);
372 
373 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
374 				if (sess->ts_sess.ctx == ts_ctx &&
375 				    sess != s) {
376 					sess->ts_sess.ctx = NULL;
377 					count++;
378 				}
379 			}
380 		}
381 	}
382 
383 	ctx = ts_to_ta_ctx(ts_ctx);
384 	assert(count == ctx->ref_count);
385 
386 	TAILQ_REMOVE(&tee_ctxes, ctx, link);
387 	mutex_unlock(&tee_ta_mutex);
388 
389 	destroy_context(ctx);
390 	s->ts_sess.ctx = NULL;
391 }
392 
393 /*
394  * tee_ta_context_find - Find TA in session list based on a UUID (input)
395  * Returns a pointer to the session
396  */
397 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
398 {
399 	struct tee_ta_ctx *ctx;
400 
401 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
402 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
403 			return ctx;
404 	}
405 
406 	return NULL;
407 }
408 
409 /* check if requester (client ID) matches session initial client */
410 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
411 {
412 	if (id == KERN_IDENTITY)
413 		return TEE_SUCCESS;
414 
415 	if (id == NSAPP_IDENTITY) {
416 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
417 			DMSG("nsec tries to hijack TA session");
418 			return TEE_ERROR_ACCESS_DENIED;
419 		}
420 		return TEE_SUCCESS;
421 	}
422 
423 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
424 		DMSG("client id mismatch");
425 		return TEE_ERROR_ACCESS_DENIED;
426 	}
427 	return TEE_SUCCESS;
428 }
429 
430 /*
431  * Check if invocation parameters matches TA properties
432  *
433  * @s - current session handle
434  * @param - already identified memory references hold a valid 'mobj'.
435  *
436  * Policy:
437  * - All TAs can access 'non-secure' shared memory.
438  * - All TAs can access TEE private memory (seccpy)
439  * - Only SDP flagged TAs can accept SDP memory references.
440  */
441 #ifndef CFG_SECURE_DATA_PATH
442 static bool check_params(struct tee_ta_session *sess __unused,
443 			 struct tee_ta_param *param __unused)
444 {
445 	/*
446 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
447 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
448 	 * permissions regarding memory reference parameters.
449 	 */
450 	return true;
451 }
452 #else
453 static bool check_params(struct tee_ta_session *sess,
454 			 struct tee_ta_param *param)
455 {
456 	int n;
457 
458 	/*
459 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
460 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
461 	 */
462 	if (sess->ts_sess.ctx &&
463 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
464 		return true;
465 
466 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
467 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
468 		struct param_mem *mem = &param->u[n].mem;
469 
470 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
471 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
472 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
473 			continue;
474 		if (!mem->size)
475 			continue;
476 		if (mobj_is_sdp_mem(mem->mobj))
477 			return false;
478 	}
479 	return true;
480 }
481 #endif
482 
483 static void set_invoke_timeout(struct tee_ta_session *sess,
484 				      uint32_t cancel_req_to)
485 {
486 	TEE_Time current_time;
487 	TEE_Time cancel_time;
488 
489 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
490 		goto infinite;
491 
492 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
493 		goto infinite;
494 
495 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
496 			 &cancel_time.seconds))
497 		goto infinite;
498 
499 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
500 	if (cancel_time.millis > 1000) {
501 		if (ADD_OVERFLOW(current_time.seconds, 1,
502 				 &cancel_time.seconds))
503 			goto infinite;
504 
505 		cancel_time.seconds++;
506 		cancel_time.millis -= 1000;
507 	}
508 
509 	sess->cancel_time = cancel_time;
510 	return;
511 
512 infinite:
513 	sess->cancel_time.seconds = UINT32_MAX;
514 	sess->cancel_time.millis = UINT32_MAX;
515 }
516 
517 /*-----------------------------------------------------------------------------
518  * Close a Trusted Application and free available resources
519  *---------------------------------------------------------------------------*/
520 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
521 				struct tee_ta_session_head *open_sessions,
522 				const TEE_Identity *clnt_id)
523 {
524 	struct tee_ta_session *sess = NULL;
525 	struct tee_ta_ctx *ctx = NULL;
526 	struct ts_ctx *ts_ctx = NULL;
527 	bool keep_alive = false;
528 
529 	DMSG("csess 0x%" PRIxVA " id %u",
530 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
531 
532 	if (!csess)
533 		return TEE_ERROR_ITEM_NOT_FOUND;
534 
535 	sess = tee_ta_get_session(csess->id, true, open_sessions);
536 
537 	if (!sess) {
538 		EMSG("session 0x%" PRIxVA " to be removed is not found",
539 		     (vaddr_t)csess);
540 		return TEE_ERROR_ITEM_NOT_FOUND;
541 	}
542 
543 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
544 		tee_ta_put_session(sess);
545 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
546 	}
547 
548 	DMSG("Destroy session");
549 
550 	ts_ctx = sess->ts_sess.ctx;
551 	if (!ts_ctx) {
552 		destroy_session(sess, open_sessions);
553 		return TEE_SUCCESS;
554 	}
555 
556 	ctx = ts_to_ta_ctx(ts_ctx);
557 	if (ctx->panicked) {
558 		destroy_session(sess, open_sessions);
559 	} else {
560 		tee_ta_set_busy(ctx);
561 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
562 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
563 		destroy_session(sess, open_sessions);
564 		tee_ta_clear_busy(ctx);
565 	}
566 
567 	mutex_lock(&tee_ta_mutex);
568 
569 	if (ctx->ref_count <= 0)
570 		panic();
571 
572 	ctx->ref_count--;
573 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
574 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
575 	if (!ctx->ref_count && !keep_alive) {
576 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
577 		mutex_unlock(&tee_ta_mutex);
578 
579 		destroy_context(ctx);
580 	} else
581 		mutex_unlock(&tee_ta_mutex);
582 
583 	return TEE_SUCCESS;
584 }
585 
586 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
587 						   const TEE_UUID *uuid)
588 {
589 	struct tee_ta_ctx *ctx = NULL;
590 
591 	while (true) {
592 		ctx = tee_ta_context_find(uuid);
593 		if (!ctx)
594 			return TEE_ERROR_ITEM_NOT_FOUND;
595 
596 		if (!is_user_ta_ctx(&ctx->ts_ctx) ||
597 		    !to_user_ta_ctx(&ctx->ts_ctx)->uctx.is_initializing)
598 			break;
599 		/*
600 		 * Context is still initializing, wait here until it's
601 		 * fully initialized. Note that we're searching for the
602 		 * context again since it may have been removed while we
603 		 * where sleeping.
604 		 */
605 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
606 	}
607 
608 	/*
609 	 * If TA isn't single instance it should be loaded as new
610 	 * instance instead of doing anything with this instance.
611 	 * So tell the caller that we didn't find the TA it the
612 	 * caller will load a new instance.
613 	 */
614 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
615 		return TEE_ERROR_ITEM_NOT_FOUND;
616 
617 	/*
618 	 * The TA is single instance, if it isn't multi session we
619 	 * can't create another session unless its reference is zero
620 	 */
621 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
622 		return TEE_ERROR_BUSY;
623 
624 	DMSG("Re-open TA %pUl", (void *)&ctx->ts_ctx.uuid);
625 
626 	ctx->ref_count++;
627 	s->ts_sess.ctx = &ctx->ts_ctx;
628 	s->ts_sess.handle_svc = s->ts_sess.ctx->ops->handle_svc;
629 	return TEE_SUCCESS;
630 }
631 
632 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
633 {
634 	struct tee_ta_session *last = NULL;
635 	uint32_t saved = 0;
636 	uint32_t id = 1;
637 
638 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
639 	if (last) {
640 		/* This value is less likely to be already used */
641 		id = last->id + 1;
642 		if (!id)
643 			id++; /* 0 is not valid */
644 	}
645 
646 	saved = id;
647 	do {
648 		if (!tee_ta_find_session_nolock(id, open_sessions))
649 			return id;
650 		id++;
651 		if (!id)
652 			id++;
653 	} while (id != saved);
654 
655 	return 0;
656 }
657 
658 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
659 				struct tee_ta_session_head *open_sessions,
660 				const TEE_UUID *uuid,
661 				struct tee_ta_session **sess)
662 {
663 	TEE_Result res;
664 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
665 
666 	*err = TEE_ORIGIN_TEE;
667 	if (!s)
668 		return TEE_ERROR_OUT_OF_MEMORY;
669 
670 	s->cancel_mask = true;
671 	condvar_init(&s->refc_cv);
672 	condvar_init(&s->lock_cv);
673 	s->lock_thread = THREAD_ID_INVALID;
674 	s->ref_count = 1;
675 
676 	mutex_lock(&tee_ta_mutex);
677 	s->id = new_session_id(open_sessions);
678 	if (!s->id) {
679 		res = TEE_ERROR_OVERFLOW;
680 		goto err_mutex_unlock;
681 	}
682 
683 	TAILQ_INSERT_TAIL(open_sessions, s, link);
684 
685 	/* Look for already loaded TA */
686 	res = tee_ta_init_session_with_context(s, uuid);
687 	mutex_unlock(&tee_ta_mutex);
688 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
689 		goto out;
690 
691 	/* Look for secure partition */
692 	res = stmm_init_session(uuid, s);
693 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
694 		goto out;
695 
696 	/* Look for pseudo TA */
697 	res = tee_ta_init_pseudo_ta_session(uuid, s);
698 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
699 		goto out;
700 
701 	/* Look for user TA */
702 	res = tee_ta_init_user_ta_session(uuid, s);
703 
704 out:
705 	if (!res) {
706 		*sess = s;
707 		return TEE_SUCCESS;
708 	}
709 
710 	mutex_lock(&tee_ta_mutex);
711 	TAILQ_REMOVE(open_sessions, s, link);
712 err_mutex_unlock:
713 	mutex_unlock(&tee_ta_mutex);
714 	free(s);
715 	return res;
716 }
717 
718 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
719 			       struct tee_ta_session **sess,
720 			       struct tee_ta_session_head *open_sessions,
721 			       const TEE_UUID *uuid,
722 			       const TEE_Identity *clnt_id,
723 			       uint32_t cancel_req_to,
724 			       struct tee_ta_param *param)
725 {
726 	TEE_Result res = TEE_SUCCESS;
727 	struct tee_ta_session *s = NULL;
728 	struct tee_ta_ctx *ctx = NULL;
729 	struct ts_ctx *ts_ctx = NULL;
730 	bool panicked = false;
731 	bool was_busy = false;
732 
733 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
734 	if (res != TEE_SUCCESS) {
735 		DMSG("init session failed 0x%x", res);
736 		return res;
737 	}
738 
739 	if (!check_params(s, param))
740 		return TEE_ERROR_BAD_PARAMETERS;
741 
742 	ts_ctx = s->ts_sess.ctx;
743 	if (ts_ctx)
744 		ctx = ts_to_ta_ctx(ts_ctx);
745 
746 	if (!ctx || ctx->panicked) {
747 		DMSG("panicked, call tee_ta_close_session()");
748 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
749 		*err = TEE_ORIGIN_TEE;
750 		return TEE_ERROR_TARGET_DEAD;
751 	}
752 
753 	*sess = s;
754 	/* Save identity of the owner of the session */
755 	s->clnt_id = *clnt_id;
756 
757 	if (tee_ta_try_set_busy(ctx)) {
758 		s->param = param;
759 		set_invoke_timeout(s, cancel_req_to);
760 		res = ts_ctx->ops->enter_open_session(&s->ts_sess);
761 		tee_ta_clear_busy(ctx);
762 	} else {
763 		/* Deadlock avoided */
764 		res = TEE_ERROR_BUSY;
765 		was_busy = true;
766 	}
767 
768 	panicked = ctx->panicked;
769 	s->param = NULL;
770 
771 	/*
772 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
773 	 * apart from panicking.
774 	 */
775 	if (panicked || was_busy)
776 		*err = TEE_ORIGIN_TEE;
777 	else
778 		*err = s->err_origin;
779 
780 	tee_ta_put_session(s);
781 	if (panicked || res != TEE_SUCCESS)
782 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
783 
784 	if (res != TEE_SUCCESS)
785 		EMSG("Failed. Return error 0x%x", res);
786 
787 	return res;
788 }
789 
790 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
791 				 struct tee_ta_session *sess,
792 				 const TEE_Identity *clnt_id,
793 				 uint32_t cancel_req_to, uint32_t cmd,
794 				 struct tee_ta_param *param)
795 {
796 	struct tee_ta_ctx *ta_ctx = NULL;
797 	struct ts_ctx *ts_ctx = NULL;
798 	TEE_Result res = TEE_SUCCESS;
799 
800 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
801 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
802 
803 	if (!check_params(sess, param))
804 		return TEE_ERROR_BAD_PARAMETERS;
805 
806 	ts_ctx = sess->ts_sess.ctx;
807 	if (!ts_ctx) {
808 		/* The context has been already destroyed */
809 		*err = TEE_ORIGIN_TEE;
810 		return TEE_ERROR_TARGET_DEAD;
811 	}
812 
813 	ta_ctx = ts_to_ta_ctx(ts_ctx);
814 	if (ta_ctx->panicked) {
815 		DMSG("Panicked !");
816 		destroy_ta_ctx_from_session(sess);
817 		*err = TEE_ORIGIN_TEE;
818 		return TEE_ERROR_TARGET_DEAD;
819 	}
820 
821 	tee_ta_set_busy(ta_ctx);
822 
823 	sess->param = param;
824 	set_invoke_timeout(sess, cancel_req_to);
825 	res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
826 
827 	sess->param = NULL;
828 	tee_ta_clear_busy(ta_ctx);
829 
830 	if (ta_ctx->panicked) {
831 		destroy_ta_ctx_from_session(sess);
832 		*err = TEE_ORIGIN_TEE;
833 		return TEE_ERROR_TARGET_DEAD;
834 	}
835 
836 	*err = sess->err_origin;
837 
838 	/* Short buffer is not an effective error case */
839 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
840 		DMSG("Error: %x of %d", res, *err);
841 
842 	return res;
843 }
844 
845 #if defined(CFG_TA_STATS)
846 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
847 				   struct tee_ta_param *param)
848 {
849 	TEE_Result res = TEE_SUCCESS;
850 	struct tee_ta_ctx *ctx = NULL;
851 	struct ts_ctx *ts_ctx = NULL;
852 
853 	ts_ctx = s->ts_sess.ctx;
854 	if (!ts_ctx)
855 		return TEE_ERROR_ITEM_NOT_FOUND;
856 
857 	ctx = ts_to_ta_ctx(ts_ctx);
858 
859 	if (ctx->panicked)
860 		return TEE_ERROR_TARGET_DEAD;
861 
862 	if (tee_ta_try_set_busy(ctx)) {
863 		s->param = param;
864 		set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
865 		res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
866 		s->param = NULL;
867 		tee_ta_clear_busy(ctx);
868 	} else {
869 		/* Deadlock avoided */
870 		res = TEE_ERROR_BUSY;
871 	}
872 
873 	return res;
874 }
875 
876 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
877 {
878 	struct tee_ta_session *sess = NULL;
879 	struct tee_ta_session_head *open_sessions = NULL;
880 	struct tee_ta_ctx *ctx = NULL;
881 	unsigned int n = 0;
882 
883 	nsec_sessions_list_head(&open_sessions);
884 	/*
885 	 * Scan all sessions opened from secure side by searching through
886 	 * all available TA instances and for each context, scan all opened
887 	 * sessions.
888 	 */
889 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
890 		unsigned int cnt = 0;
891 
892 		if (!is_user_ta_ctx(&ctx->ts_ctx))
893 			continue;
894 
895 		memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
896 		       sizeof(ctx->ts_ctx.uuid));
897 		dump_ctx[n].panicked = ctx->panicked;
898 		dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
899 		TAILQ_FOREACH(sess, open_sessions, link) {
900 			if (sess->ts_sess.ctx == &ctx->ts_ctx) {
901 				if (cnt == MAX_DUMP_SESS_NUM)
902 					break;
903 
904 				dump_ctx[n].sess_id[cnt] = sess->id;
905 				cnt++;
906 			}
907 		}
908 
909 		dump_ctx[n].sess_num = cnt;
910 		n++;
911 	}
912 }
913 
914 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
915 				struct tee_ta_dump_stats *dump_stats,
916 				size_t ta_count)
917 {
918 	TEE_Result res = TEE_SUCCESS;
919 	struct tee_ta_session *sess = NULL;
920 	struct tee_ta_session_head *open_sessions = NULL;
921 	struct tee_ta_param param = { };
922 	unsigned int i = 0;
923 	unsigned int j = 0;
924 
925 	nsec_sessions_list_head(&open_sessions);
926 
927 	for (i = 0; i < ta_count; i++) {
928 		struct tee_ta_dump_stats *stats = &dump_stats[i];
929 
930 		memcpy(&stats->uuid, &dump_ctx[i].uuid,
931 		       sizeof(dump_ctx[i].uuid));
932 		stats->panicked = dump_ctx[i].panicked;
933 		stats->sess_num = dump_ctx[i].sess_num;
934 
935 		/* Find a session from dump context */
936 		for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
937 			sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
938 						  open_sessions);
939 
940 		if (!sess)
941 			continue;
942 		/* If session is existing, get its heap stats */
943 		memset(&param, 0, sizeof(struct tee_ta_param));
944 		param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
945 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
946 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
947 					      TEE_PARAM_TYPE_NONE);
948 		res = dump_ta_memstats(sess, &param);
949 		if (res == TEE_SUCCESS) {
950 			stats->heap.allocated = param.u[0].val.a;
951 			stats->heap.max_allocated = param.u[0].val.b;
952 			stats->heap.size = param.u[1].val.a;
953 			stats->heap.num_alloc_fail = param.u[1].val.b;
954 			stats->heap.biggest_alloc_fail = param.u[2].val.a;
955 			stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
956 		} else {
957 			memset(&stats->heap, 0, sizeof(stats->heap));
958 		}
959 		tee_ta_put_session(sess);
960 	}
961 
962 	return TEE_SUCCESS;
963 }
964 
965 TEE_Result tee_ta_instance_stats(void *buf, uint32_t *buf_size)
966 {
967 	TEE_Result res = TEE_SUCCESS;
968 	struct tee_ta_dump_stats *dump_stats = NULL;
969 	struct tee_ta_dump_ctx *dump_ctx = NULL;
970 	struct tee_ta_ctx *ctx = NULL;
971 	size_t sz = 0;
972 	size_t ta_count = 0;
973 
974 	if (!buf_size)
975 		return TEE_ERROR_BAD_PARAMETERS;
976 
977 	mutex_lock(&tee_ta_mutex);
978 
979 	/* Go through all available TA and calc out the actual buffer size. */
980 	TAILQ_FOREACH(ctx, &tee_ctxes, link)
981 		if (is_user_ta_ctx(&ctx->ts_ctx))
982 			ta_count++;
983 
984 	sz = sizeof(struct tee_ta_dump_stats) * ta_count;
985 	if (!buf || *buf_size < sz) {
986 		/*
987 		 * buf is null or pass size less than actual size
988 		 * means caller try to query the buffer size.
989 		 * update *buf_size.
990 		 */
991 		*buf_size = sz;
992 		res = TEE_ERROR_SHORT_BUFFER;
993 	} else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
994 		DMSG("Data alignment");
995 		res = TEE_ERROR_BAD_PARAMETERS;
996 	} else {
997 		dump_stats = (struct tee_ta_dump_stats *)buf;
998 		dump_ctx = malloc(sz);
999 		if (!dump_ctx)
1000 			res = TEE_ERROR_OUT_OF_MEMORY;
1001 		else
1002 			init_dump_ctx(dump_ctx);
1003 	}
1004 	mutex_unlock(&tee_ta_mutex);
1005 
1006 	if (res != TEE_SUCCESS)
1007 		return res;
1008 
1009 	/* Dump user ta stats by iterating dump_ctx[] */
1010 	res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
1011 	if (res == TEE_SUCCESS)
1012 		*buf_size = sz;
1013 
1014 	free(dump_ctx);
1015 	return res;
1016 }
1017 #endif
1018 
1019 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
1020 				 struct tee_ta_session *sess,
1021 				 const TEE_Identity *clnt_id)
1022 {
1023 	*err = TEE_ORIGIN_TEE;
1024 
1025 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
1026 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1027 
1028 	sess->cancel = true;
1029 	return TEE_SUCCESS;
1030 }
1031 
1032 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1033 {
1034 	TEE_Time current_time;
1035 
1036 	if (s->cancel_mask)
1037 		return false;
1038 
1039 	if (s->cancel)
1040 		return true;
1041 
1042 	if (s->cancel_time.seconds == UINT32_MAX)
1043 		return false;
1044 
1045 	if (curr_time != NULL)
1046 		current_time = *curr_time;
1047 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
1048 		return false;
1049 
1050 	if (current_time.seconds > s->cancel_time.seconds ||
1051 	    (current_time.seconds == s->cancel_time.seconds &&
1052 	     current_time.millis >= s->cancel_time.millis)) {
1053 		return true;
1054 	}
1055 
1056 	return false;
1057 }
1058 
1059 #if defined(CFG_TA_GPROF_SUPPORT)
1060 void tee_ta_gprof_sample_pc(vaddr_t pc)
1061 {
1062 	struct ts_session *s = ts_get_current_session();
1063 	struct user_ta_ctx *utc = NULL;
1064 	struct sample_buf *sbuf = NULL;
1065 	TEE_Result res = 0;
1066 	size_t idx = 0;
1067 
1068 	sbuf = s->sbuf;
1069 	if (!sbuf || !sbuf->enabled)
1070 		return; /* PC sampling is not enabled */
1071 
1072 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1073 	if (idx < sbuf->nsamples) {
1074 		utc = to_user_ta_ctx(s->ctx);
1075 		res = vm_check_access_rights(&utc->uctx,
1076 					     TEE_MEMORY_ACCESS_READ |
1077 					     TEE_MEMORY_ACCESS_WRITE |
1078 					     TEE_MEMORY_ACCESS_ANY_OWNER,
1079 					     (uaddr_t)&sbuf->samples[idx],
1080 					     sizeof(*sbuf->samples));
1081 		if (res != TEE_SUCCESS)
1082 			return;
1083 		sbuf->samples[idx]++;
1084 	}
1085 	sbuf->count++;
1086 }
1087 
1088 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1089 				       uint64_t now)
1090 {
1091 	struct sample_buf *sbuf = s->sbuf;
1092 
1093 	if (!sbuf)
1094 		return;
1095 
1096 	if (suspend) {
1097 		assert(sbuf->usr_entered);
1098 		sbuf->usr += now - sbuf->usr_entered;
1099 		sbuf->usr_entered = 0;
1100 	} else {
1101 		assert(!sbuf->usr_entered);
1102 		if (!now)
1103 			now++; /* 0 is reserved */
1104 		sbuf->usr_entered = now;
1105 	}
1106 }
1107 
1108 /*
1109  * Update user-mode CPU time for the current session
1110  * @suspend: true if session is being suspended (leaving user mode), false if
1111  * it is resumed (entering user mode)
1112  */
1113 static void tee_ta_update_session_utime(bool suspend)
1114 {
1115 	struct ts_session *s = ts_get_current_session();
1116 	uint64_t now = barrier_read_counter_timer();
1117 
1118 	gprof_update_session_utime(suspend, s, now);
1119 }
1120 
1121 void tee_ta_update_session_utime_suspend(void)
1122 {
1123 	tee_ta_update_session_utime(true);
1124 }
1125 
1126 void tee_ta_update_session_utime_resume(void)
1127 {
1128 	tee_ta_update_session_utime(false);
1129 }
1130 #endif
1131 
1132 #if defined(CFG_FTRACE_SUPPORT)
1133 static void ftrace_update_times(bool suspend)
1134 {
1135 	struct ts_session *s = ts_get_current_session_may_fail();
1136 	struct ftrace_buf *fbuf = NULL;
1137 	uint64_t now = 0;
1138 	uint32_t i = 0;
1139 
1140 	if (!s)
1141 		return;
1142 
1143 	now = barrier_read_counter_timer();
1144 
1145 	fbuf = s->fbuf;
1146 	if (!fbuf)
1147 		return;
1148 
1149 	if (suspend) {
1150 		fbuf->suspend_time = now;
1151 	} else {
1152 		for (i = 0; i <= fbuf->ret_idx; i++)
1153 			fbuf->begin_time[i] += now - fbuf->suspend_time;
1154 	}
1155 }
1156 
1157 void tee_ta_ftrace_update_times_suspend(void)
1158 {
1159 	ftrace_update_times(true);
1160 }
1161 
1162 void tee_ta_ftrace_update_times_resume(void)
1163 {
1164 	ftrace_update_times(false);
1165 }
1166 #endif
1167 
1168 bool is_ta_ctx(struct ts_ctx *ctx)
1169 {
1170 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1171 }
1172