xref: /optee_os/core/kernel/tee_ta_manager.c (revision 941a58d78c99c4754fbd4ec3079ec9e1d596af8f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/stmm_sp.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <malloc.h>
20 #include <mm/core_memprot.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <mm/vm.h>
24 #include <pta_stats.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <tee_api_types.h>
28 #include <tee/entry_std.h>
29 #include <tee/tee_obj.h>
30 #include <trace.h>
31 #include <types_ext.h>
32 #include <user_ta_header.h>
33 #include <utee_types.h>
34 #include <util.h>
35 
36 #if defined(CFG_TA_STATS)
37 #define MAX_DUMP_SESS_NUM	(16)
38 
39 struct tee_ta_dump_ctx {
40 	TEE_UUID uuid;
41 	uint32_t panicked;
42 	bool is_user_ta;
43 	uint32_t sess_num;
44 	uint32_t sess_id[MAX_DUMP_SESS_NUM];
45 };
46 #endif
47 
48 /* This mutex protects the critical section in tee_ta_init_session */
49 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
50 /* This condvar is used when waiting for a TA context to become initialized */
51 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
52 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
53 
54 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
55 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
56 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
57 static size_t tee_ta_single_instance_count;
58 #endif
59 
60 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
61 static void lock_single_instance(void)
62 {
63 }
64 
65 static void unlock_single_instance(void)
66 {
67 }
68 
69 static bool has_single_instance_lock(void)
70 {
71 	return false;
72 }
73 #else
74 static void lock_single_instance(void)
75 {
76 	/* Requires tee_ta_mutex to be held */
77 	if (tee_ta_single_instance_thread != thread_get_id()) {
78 		/* Wait until the single-instance lock is available. */
79 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
80 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
81 
82 		tee_ta_single_instance_thread = thread_get_id();
83 		assert(tee_ta_single_instance_count == 0);
84 	}
85 
86 	tee_ta_single_instance_count++;
87 }
88 
89 static void unlock_single_instance(void)
90 {
91 	/* Requires tee_ta_mutex to be held */
92 	assert(tee_ta_single_instance_thread == thread_get_id());
93 	assert(tee_ta_single_instance_count > 0);
94 
95 	tee_ta_single_instance_count--;
96 	if (tee_ta_single_instance_count == 0) {
97 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
98 		condvar_signal(&tee_ta_cv);
99 	}
100 }
101 
102 static bool has_single_instance_lock(void)
103 {
104 	/* Requires tee_ta_mutex to be held */
105 	return tee_ta_single_instance_thread == thread_get_id();
106 }
107 #endif
108 
109 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
110 {
111 	assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
112 	return container_of(sess, struct tee_ta_session, ts_sess);
113 }
114 
115 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
116 {
117 	if (is_ta_ctx(ctx))
118 		return to_ta_ctx(ctx);
119 
120 	if (is_stmm_ctx(ctx))
121 		return &(to_stmm_ctx(ctx)->ta_ctx);
122 
123 	panic("bad context");
124 }
125 
126 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
127 {
128 	bool rc = true;
129 
130 	if (ctx->flags & TA_FLAG_CONCURRENT)
131 		return true;
132 
133 	mutex_lock(&tee_ta_mutex);
134 
135 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
136 		lock_single_instance();
137 
138 	if (has_single_instance_lock()) {
139 		if (ctx->busy) {
140 			/*
141 			 * We're holding the single-instance lock and the
142 			 * TA is busy, as waiting now would only cause a
143 			 * dead-lock, we release the lock and return false.
144 			 */
145 			rc = false;
146 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
147 				unlock_single_instance();
148 		}
149 	} else {
150 		/*
151 		 * We're not holding the single-instance lock, we're free to
152 		 * wait for the TA to become available.
153 		 */
154 		while (ctx->busy)
155 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
156 	}
157 
158 	/* Either it's already true or we should set it to true */
159 	ctx->busy = true;
160 
161 	mutex_unlock(&tee_ta_mutex);
162 	return rc;
163 }
164 
165 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
166 {
167 	if (!tee_ta_try_set_busy(ctx))
168 		panic();
169 }
170 
171 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
172 {
173 	if (ctx->flags & TA_FLAG_CONCURRENT)
174 		return;
175 
176 	mutex_lock(&tee_ta_mutex);
177 
178 	assert(ctx->busy);
179 	ctx->busy = false;
180 	condvar_signal(&ctx->busy_cv);
181 
182 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
183 		unlock_single_instance();
184 
185 	mutex_unlock(&tee_ta_mutex);
186 }
187 
188 static void dec_session_ref_count(struct tee_ta_session *s)
189 {
190 	assert(s->ref_count > 0);
191 	s->ref_count--;
192 	if (s->ref_count == 1)
193 		condvar_signal(&s->refc_cv);
194 }
195 
196 void tee_ta_put_session(struct tee_ta_session *s)
197 {
198 	mutex_lock(&tee_ta_mutex);
199 
200 	if (s->lock_thread == thread_get_id()) {
201 		s->lock_thread = THREAD_ID_INVALID;
202 		condvar_signal(&s->lock_cv);
203 	}
204 	dec_session_ref_count(s);
205 
206 	mutex_unlock(&tee_ta_mutex);
207 }
208 
209 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
210 			struct tee_ta_session_head *open_sessions)
211 {
212 	struct tee_ta_session *s = NULL;
213 	struct tee_ta_session *found = NULL;
214 
215 	TAILQ_FOREACH(s, open_sessions, link) {
216 		if (s->id == id) {
217 			found = s;
218 			break;
219 		}
220 	}
221 
222 	return found;
223 }
224 
225 struct tee_ta_session *tee_ta_find_session(uint32_t id,
226 			struct tee_ta_session_head *open_sessions)
227 {
228 	struct tee_ta_session *s = NULL;
229 
230 	mutex_lock(&tee_ta_mutex);
231 
232 	s = tee_ta_find_session_nolock(id, open_sessions);
233 
234 	mutex_unlock(&tee_ta_mutex);
235 
236 	return s;
237 }
238 
239 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
240 			struct tee_ta_session_head *open_sessions)
241 {
242 	struct tee_ta_session *s;
243 
244 	mutex_lock(&tee_ta_mutex);
245 
246 	while (true) {
247 		s = tee_ta_find_session_nolock(id, open_sessions);
248 		if (!s)
249 			break;
250 		if (s->unlink) {
251 			s = NULL;
252 			break;
253 		}
254 		s->ref_count++;
255 		if (!exclusive)
256 			break;
257 
258 		assert(s->lock_thread != thread_get_id());
259 
260 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
261 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
262 
263 		if (s->unlink) {
264 			dec_session_ref_count(s);
265 			s = NULL;
266 			break;
267 		}
268 
269 		s->lock_thread = thread_get_id();
270 		break;
271 	}
272 
273 	mutex_unlock(&tee_ta_mutex);
274 	return s;
275 }
276 
277 static void tee_ta_unlink_session(struct tee_ta_session *s,
278 			struct tee_ta_session_head *open_sessions)
279 {
280 	mutex_lock(&tee_ta_mutex);
281 
282 	assert(s->ref_count >= 1);
283 	assert(s->lock_thread == thread_get_id());
284 	assert(!s->unlink);
285 
286 	s->unlink = true;
287 	condvar_broadcast(&s->lock_cv);
288 
289 	while (s->ref_count != 1)
290 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
291 
292 	TAILQ_REMOVE(open_sessions, s, link);
293 
294 	mutex_unlock(&tee_ta_mutex);
295 }
296 
297 static void destroy_session(struct tee_ta_session *s,
298 			    struct tee_ta_session_head *open_sessions)
299 {
300 #if defined(CFG_FTRACE_SUPPORT)
301 	if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
302 		ts_push_current_session(&s->ts_sess);
303 		s->ts_sess.fbuf = NULL;
304 		s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
305 		ts_pop_current_session();
306 	}
307 #endif
308 
309 	tee_ta_unlink_session(s, open_sessions);
310 #if defined(CFG_TA_GPROF_SUPPORT)
311 	free(s->ts_sess.sbuf);
312 #endif
313 	free(s);
314 }
315 
316 static void destroy_context(struct tee_ta_ctx *ctx)
317 {
318 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
319 
320 	condvar_destroy(&ctx->busy_cv);
321 	ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
322 }
323 
324 /*
325  * tee_ta_context_find - Find TA in session list based on a UUID (input)
326  * Returns a pointer to the session
327  */
328 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
329 {
330 	struct tee_ta_ctx *ctx;
331 
332 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
333 		if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
334 			return ctx;
335 	}
336 
337 	return NULL;
338 }
339 
340 /* check if requester (client ID) matches session initial client */
341 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
342 {
343 	if (id == KERN_IDENTITY)
344 		return TEE_SUCCESS;
345 
346 	if (id == NSAPP_IDENTITY) {
347 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
348 			DMSG("nsec tries to hijack TA session");
349 			return TEE_ERROR_ACCESS_DENIED;
350 		}
351 		return TEE_SUCCESS;
352 	}
353 
354 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
355 		DMSG("client id mismatch");
356 		return TEE_ERROR_ACCESS_DENIED;
357 	}
358 	return TEE_SUCCESS;
359 }
360 
361 /*
362  * Check if invocation parameters matches TA properties
363  *
364  * @s - current session handle
365  * @param - already identified memory references hold a valid 'mobj'.
366  *
367  * Policy:
368  * - All TAs can access 'non-secure' shared memory.
369  * - All TAs can access TEE private memory (seccpy)
370  * - Only SDP flagged TAs can accept SDP memory references.
371  */
372 #ifndef CFG_SECURE_DATA_PATH
373 static bool check_params(struct tee_ta_session *sess __unused,
374 			 struct tee_ta_param *param __unused)
375 {
376 	/*
377 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
378 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
379 	 * permissions regarding memory reference parameters.
380 	 */
381 	return true;
382 }
383 #else
384 static bool check_params(struct tee_ta_session *sess,
385 			 struct tee_ta_param *param)
386 {
387 	int n;
388 
389 	/*
390 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
391 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
392 	 */
393 	if (sess->ts_sess.ctx &&
394 	    ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
395 		return true;
396 
397 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
398 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
399 		struct param_mem *mem = &param->u[n].mem;
400 
401 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
402 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
403 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
404 			continue;
405 		if (!mem->size)
406 			continue;
407 		if (mobj_is_sdp_mem(mem->mobj))
408 			return false;
409 	}
410 	return true;
411 }
412 #endif
413 
414 static void set_invoke_timeout(struct tee_ta_session *sess,
415 				      uint32_t cancel_req_to)
416 {
417 	TEE_Time current_time;
418 	TEE_Time cancel_time;
419 
420 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
421 		goto infinite;
422 
423 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
424 		goto infinite;
425 
426 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
427 			 &cancel_time.seconds))
428 		goto infinite;
429 
430 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
431 	if (cancel_time.millis > 1000) {
432 		if (ADD_OVERFLOW(current_time.seconds, 1,
433 				 &cancel_time.seconds))
434 			goto infinite;
435 
436 		cancel_time.seconds++;
437 		cancel_time.millis -= 1000;
438 	}
439 
440 	sess->cancel_time = cancel_time;
441 	return;
442 
443 infinite:
444 	sess->cancel_time.seconds = UINT32_MAX;
445 	sess->cancel_time.millis = UINT32_MAX;
446 }
447 
448 /*-----------------------------------------------------------------------------
449  * Close a Trusted Application and free available resources
450  *---------------------------------------------------------------------------*/
451 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
452 				struct tee_ta_session_head *open_sessions,
453 				const TEE_Identity *clnt_id)
454 {
455 	struct tee_ta_session *sess = NULL;
456 	struct tee_ta_ctx *ctx = NULL;
457 	struct ts_ctx *ts_ctx = NULL;
458 	bool keep_crashed = false;
459 	bool keep_alive = false;
460 
461 	DMSG("csess 0x%" PRIxVA " id %u",
462 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
463 
464 	if (!csess)
465 		return TEE_ERROR_ITEM_NOT_FOUND;
466 
467 	sess = tee_ta_get_session(csess->id, true, open_sessions);
468 
469 	if (!sess) {
470 		EMSG("session 0x%" PRIxVA " to be removed is not found",
471 		     (vaddr_t)csess);
472 		return TEE_ERROR_ITEM_NOT_FOUND;
473 	}
474 
475 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
476 		tee_ta_put_session(sess);
477 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
478 	}
479 
480 	DMSG("Destroy session");
481 
482 	ts_ctx = sess->ts_sess.ctx;
483 	if (!ts_ctx) {
484 		destroy_session(sess, open_sessions);
485 		return TEE_SUCCESS;
486 	}
487 
488 	ctx = ts_to_ta_ctx(ts_ctx);
489 	if (ctx->panicked) {
490 		destroy_session(sess, open_sessions);
491 	} else {
492 		tee_ta_set_busy(ctx);
493 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
494 		ts_ctx->ops->enter_close_session(&sess->ts_sess);
495 		destroy_session(sess, open_sessions);
496 		tee_ta_clear_busy(ctx);
497 	}
498 
499 	mutex_lock(&tee_ta_mutex);
500 
501 	if (ctx->ref_count <= 0)
502 		panic();
503 
504 	ctx->ref_count--;
505 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
506 		keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
507 	if (keep_alive)
508 		keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
509 	if (!ctx->ref_count &&
510 	    ((ctx->panicked && !keep_crashed) || !keep_alive)) {
511 		if (!ctx->is_releasing) {
512 			TAILQ_REMOVE(&tee_ctxes, ctx, link);
513 			ctx->is_releasing = true;
514 		}
515 		mutex_unlock(&tee_ta_mutex);
516 
517 		destroy_context(ctx);
518 	} else
519 		mutex_unlock(&tee_ta_mutex);
520 
521 	return TEE_SUCCESS;
522 }
523 
524 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
525 						   const TEE_UUID *uuid)
526 {
527 	struct tee_ta_ctx *ctx = NULL;
528 
529 	while (true) {
530 		ctx = tee_ta_context_find(uuid);
531 		if (!ctx)
532 			return TEE_ERROR_ITEM_NOT_FOUND;
533 
534 		if (!ctx->is_initializing)
535 			break;
536 		/*
537 		 * Context is still initializing, wait here until it's
538 		 * fully initialized. Note that we're searching for the
539 		 * context again since it may have been removed while we
540 		 * where sleeping.
541 		 */
542 		condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
543 	}
544 
545 	/*
546 	 * If the trusted service is not a single instance service (e.g. is
547 	 * a multi-instance TA) it should be loaded as a new instance instead
548 	 * of doing anything with this instance. So tell the caller that we
549 	 * didn't find the TA it the caller will load a new instance.
550 	 */
551 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
552 		return TEE_ERROR_ITEM_NOT_FOUND;
553 
554 	/*
555 	 * The trusted service is single instance, if it isn't multi session we
556 	 * can't create another session unless its reference is zero
557 	 */
558 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
559 		return TEE_ERROR_BUSY;
560 
561 	DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid);
562 
563 	ctx->ref_count++;
564 	s->ts_sess.ctx = &ctx->ts_ctx;
565 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
566 	return TEE_SUCCESS;
567 }
568 
569 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
570 {
571 	struct tee_ta_session *last = NULL;
572 	uint32_t saved = 0;
573 	uint32_t id = 1;
574 
575 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
576 	if (last) {
577 		/* This value is less likely to be already used */
578 		id = last->id + 1;
579 		if (!id)
580 			id++; /* 0 is not valid */
581 	}
582 
583 	saved = id;
584 	do {
585 		if (!tee_ta_find_session_nolock(id, open_sessions))
586 			return id;
587 		id++;
588 		if (!id)
589 			id++;
590 	} while (id != saved);
591 
592 	return 0;
593 }
594 
595 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
596 				struct tee_ta_session_head *open_sessions,
597 				const TEE_UUID *uuid,
598 				struct tee_ta_session **sess)
599 {
600 	TEE_Result res;
601 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
602 
603 	*err = TEE_ORIGIN_TEE;
604 	if (!s)
605 		return TEE_ERROR_OUT_OF_MEMORY;
606 
607 	s->cancel_mask = true;
608 	condvar_init(&s->refc_cv);
609 	condvar_init(&s->lock_cv);
610 	s->lock_thread = THREAD_ID_INVALID;
611 	s->ref_count = 1;
612 
613 	mutex_lock(&tee_ta_mutex);
614 	s->id = new_session_id(open_sessions);
615 	if (!s->id) {
616 		res = TEE_ERROR_OVERFLOW;
617 		goto err_mutex_unlock;
618 	}
619 
620 	TAILQ_INSERT_TAIL(open_sessions, s, link);
621 
622 	/* Look for already loaded TA */
623 	res = tee_ta_init_session_with_context(s, uuid);
624 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
625 		mutex_unlock(&tee_ta_mutex);
626 		goto out;
627 	}
628 
629 	/* Look for secure partition */
630 	res = stmm_init_session(uuid, s);
631 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
632 		mutex_unlock(&tee_ta_mutex);
633 		if (res == TEE_SUCCESS)
634 			res = stmm_complete_session(s);
635 
636 		goto out;
637 	}
638 
639 	/* Look for pseudo TA */
640 	res = tee_ta_init_pseudo_ta_session(uuid, s);
641 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
642 		mutex_unlock(&tee_ta_mutex);
643 		goto out;
644 	}
645 
646 	/* Look for user TA */
647 	res = tee_ta_init_user_ta_session(uuid, s);
648 	mutex_unlock(&tee_ta_mutex);
649 	if (res == TEE_SUCCESS)
650 		res = tee_ta_complete_user_ta_session(s);
651 
652 out:
653 	if (!res) {
654 		*sess = s;
655 		return TEE_SUCCESS;
656 	}
657 
658 	mutex_lock(&tee_ta_mutex);
659 	TAILQ_REMOVE(open_sessions, s, link);
660 err_mutex_unlock:
661 	mutex_unlock(&tee_ta_mutex);
662 	free(s);
663 	return res;
664 }
665 
666 static void release_ta_ctx(struct tee_ta_ctx *ctx)
667 {
668 	bool was_releasing = false;
669 
670 	mutex_lock(&tee_ta_mutex);
671 	was_releasing = ctx->is_releasing;
672 	ctx->is_releasing = true;
673 	if (!was_releasing) {
674 		DMSG("Releasing panicked TA ctx");
675 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
676 	}
677 	mutex_unlock(&tee_ta_mutex);
678 
679 	if (!was_releasing)
680 		ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
681 }
682 
683 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
684 			       struct tee_ta_session **sess,
685 			       struct tee_ta_session_head *open_sessions,
686 			       const TEE_UUID *uuid,
687 			       const TEE_Identity *clnt_id,
688 			       uint32_t cancel_req_to,
689 			       struct tee_ta_param *param)
690 {
691 	TEE_Result res = TEE_SUCCESS;
692 	struct tee_ta_session *s = NULL;
693 	struct tee_ta_ctx *ctx = NULL;
694 	struct ts_ctx *ts_ctx = NULL;
695 	bool panicked = false;
696 	bool was_busy = false;
697 
698 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
699 	if (res != TEE_SUCCESS) {
700 		DMSG("init session failed 0x%x", res);
701 		return res;
702 	}
703 
704 	if (!check_params(s, param))
705 		return TEE_ERROR_BAD_PARAMETERS;
706 
707 	ts_ctx = s->ts_sess.ctx;
708 	ctx = ts_to_ta_ctx(ts_ctx);
709 
710 	if (tee_ta_try_set_busy(ctx)) {
711 		if (!ctx->panicked) {
712 			/* Save identity of the owner of the session */
713 			s->clnt_id = *clnt_id;
714 			s->param = param;
715 			set_invoke_timeout(s, cancel_req_to);
716 			res = ts_ctx->ops->enter_open_session(&s->ts_sess);
717 			s->param = NULL;
718 		}
719 
720 		panicked = ctx->panicked;
721 		if (panicked) {
722 			release_ta_ctx(ctx);
723 			res = TEE_ERROR_TARGET_DEAD;
724 		}
725 
726 		tee_ta_clear_busy(ctx);
727 	} else {
728 		/* Deadlock avoided */
729 		res = TEE_ERROR_BUSY;
730 		was_busy = true;
731 	}
732 
733 	/*
734 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
735 	 * apart from panicking.
736 	 */
737 	if (panicked || was_busy)
738 		*err = TEE_ORIGIN_TEE;
739 	else
740 		*err = s->err_origin;
741 
742 	tee_ta_put_session(s);
743 	if (panicked || res != TEE_SUCCESS)
744 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
745 
746 	if (!res)
747 		*sess = s;
748 	else
749 		EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res);
750 
751 	return res;
752 }
753 
754 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
755 				 struct tee_ta_session *sess,
756 				 const TEE_Identity *clnt_id,
757 				 uint32_t cancel_req_to, uint32_t cmd,
758 				 struct tee_ta_param *param)
759 {
760 	struct tee_ta_ctx *ta_ctx = NULL;
761 	struct ts_ctx *ts_ctx = NULL;
762 	TEE_Result res = TEE_SUCCESS;
763 	bool panicked = false;
764 
765 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
766 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
767 
768 	if (!check_params(sess, param))
769 		return TEE_ERROR_BAD_PARAMETERS;
770 
771 	ts_ctx = sess->ts_sess.ctx;
772 	ta_ctx = ts_to_ta_ctx(ts_ctx);
773 
774 	tee_ta_set_busy(ta_ctx);
775 
776 	if (!ta_ctx->panicked) {
777 		sess->param = param;
778 		set_invoke_timeout(sess, cancel_req_to);
779 		res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
780 		sess->param = NULL;
781 	}
782 
783 	panicked = ta_ctx->panicked;
784 	if (panicked) {
785 		release_ta_ctx(ta_ctx);
786 		res = TEE_ERROR_TARGET_DEAD;
787 	}
788 
789 	tee_ta_clear_busy(ta_ctx);
790 
791 	/*
792 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
793 	 * apart from panicking.
794 	 */
795 	if (panicked)
796 		*err = TEE_ORIGIN_TEE;
797 	else
798 		*err = sess->err_origin;
799 
800 	/* Short buffer is not an effective error case */
801 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
802 		DMSG("Error: %x of %d", res, *err);
803 
804 	return res;
805 }
806 
807 #if defined(CFG_TA_STATS)
808 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
809 				   struct tee_ta_param *param)
810 {
811 	TEE_Result res = TEE_SUCCESS;
812 	struct tee_ta_ctx *ctx = NULL;
813 	struct ts_ctx *ts_ctx = NULL;
814 	bool panicked = false;
815 
816 	ts_ctx = s->ts_sess.ctx;
817 	if (!ts_ctx)
818 		return TEE_ERROR_ITEM_NOT_FOUND;
819 
820 	ctx = ts_to_ta_ctx(ts_ctx);
821 
822 	if (ctx->is_initializing)
823 		return TEE_ERROR_BAD_STATE;
824 
825 	if (tee_ta_try_set_busy(ctx)) {
826 		if (!ctx->panicked) {
827 			s->param = param;
828 			set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
829 			res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
830 			s->param = NULL;
831 		}
832 
833 		panicked = ctx->panicked;
834 		if (panicked) {
835 			release_ta_ctx(ctx);
836 			res = TEE_ERROR_TARGET_DEAD;
837 		}
838 
839 		tee_ta_clear_busy(ctx);
840 	} else {
841 		/* Deadlock avoided */
842 		res = TEE_ERROR_BUSY;
843 	}
844 
845 	return res;
846 }
847 
848 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
849 {
850 	struct tee_ta_session *sess = NULL;
851 	struct tee_ta_session_head *open_sessions = NULL;
852 	struct tee_ta_ctx *ctx = NULL;
853 	unsigned int n = 0;
854 
855 	nsec_sessions_list_head(&open_sessions);
856 	/*
857 	 * Scan all sessions opened from secure side by searching through
858 	 * all available TA instances and for each context, scan all opened
859 	 * sessions.
860 	 */
861 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
862 		unsigned int cnt = 0;
863 
864 		if (!is_user_ta_ctx(&ctx->ts_ctx))
865 			continue;
866 
867 		memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
868 		       sizeof(ctx->ts_ctx.uuid));
869 		dump_ctx[n].panicked = ctx->panicked;
870 		dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
871 		TAILQ_FOREACH(sess, open_sessions, link) {
872 			if (sess->ts_sess.ctx == &ctx->ts_ctx) {
873 				if (cnt == MAX_DUMP_SESS_NUM)
874 					break;
875 
876 				dump_ctx[n].sess_id[cnt] = sess->id;
877 				cnt++;
878 			}
879 		}
880 
881 		dump_ctx[n].sess_num = cnt;
882 		n++;
883 	}
884 }
885 
886 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
887 				struct pta_stats_ta *dump_stats,
888 				size_t ta_count)
889 {
890 	TEE_Result res = TEE_SUCCESS;
891 	struct tee_ta_session *sess = NULL;
892 	struct tee_ta_session_head *open_sessions = NULL;
893 	struct tee_ta_param param = { };
894 	unsigned int i = 0;
895 	unsigned int j = 0;
896 
897 	nsec_sessions_list_head(&open_sessions);
898 
899 	for (i = 0; i < ta_count; i++) {
900 		struct pta_stats_ta *stats = &dump_stats[i];
901 
902 		memcpy(&stats->uuid, &dump_ctx[i].uuid,
903 		       sizeof(dump_ctx[i].uuid));
904 		stats->panicked = dump_ctx[i].panicked;
905 		stats->sess_num = dump_ctx[i].sess_num;
906 
907 		/* Find a session from dump context */
908 		for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
909 			sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
910 						  open_sessions);
911 
912 		if (!sess)
913 			continue;
914 		/* If session is existing, get its heap stats */
915 		memset(&param, 0, sizeof(struct tee_ta_param));
916 		param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
917 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
918 					      TEE_PARAM_TYPE_VALUE_OUTPUT,
919 					      TEE_PARAM_TYPE_NONE);
920 		res = dump_ta_memstats(sess, &param);
921 		if (res == TEE_SUCCESS) {
922 			stats->heap.allocated = param.u[0].val.a;
923 			stats->heap.max_allocated = param.u[0].val.b;
924 			stats->heap.size = param.u[1].val.a;
925 			stats->heap.num_alloc_fail = param.u[1].val.b;
926 			stats->heap.biggest_alloc_fail = param.u[2].val.a;
927 			stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
928 		} else {
929 			memset(&stats->heap, 0, sizeof(stats->heap));
930 		}
931 		tee_ta_put_session(sess);
932 	}
933 
934 	return TEE_SUCCESS;
935 }
936 
937 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
938 {
939 	TEE_Result res = TEE_SUCCESS;
940 	struct pta_stats_ta *dump_stats = NULL;
941 	struct tee_ta_dump_ctx *dump_ctx = NULL;
942 	struct tee_ta_ctx *ctx = NULL;
943 	size_t sz = 0;
944 	size_t ta_count = 0;
945 
946 	if (!buf_size)
947 		return TEE_ERROR_BAD_PARAMETERS;
948 
949 	mutex_lock(&tee_ta_mutex);
950 
951 	/* Go through all available TA and calc out the actual buffer size. */
952 	TAILQ_FOREACH(ctx, &tee_ctxes, link)
953 		if (is_user_ta_ctx(&ctx->ts_ctx))
954 			ta_count++;
955 
956 	sz = sizeof(struct pta_stats_ta) * ta_count;
957 	if (!sz) {
958 		/* sz = 0 means there is no UTA, return no item found. */
959 		res = TEE_ERROR_ITEM_NOT_FOUND;
960 	} else if (!buf || *buf_size < sz) {
961 		/*
962 		 * buf is null or pass size less than actual size
963 		 * means caller try to query the buffer size.
964 		 * update *buf_size.
965 		 */
966 		*buf_size = sz;
967 		res = TEE_ERROR_SHORT_BUFFER;
968 	} else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
969 		DMSG("Data alignment");
970 		res = TEE_ERROR_BAD_PARAMETERS;
971 	} else {
972 		dump_stats = (struct pta_stats_ta *)buf;
973 		dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
974 		if (!dump_ctx)
975 			res = TEE_ERROR_OUT_OF_MEMORY;
976 		else
977 			init_dump_ctx(dump_ctx);
978 	}
979 	mutex_unlock(&tee_ta_mutex);
980 
981 	if (res != TEE_SUCCESS)
982 		return res;
983 
984 	/* Dump user ta stats by iterating dump_ctx[] */
985 	res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
986 	if (res == TEE_SUCCESS)
987 		*buf_size = sz;
988 
989 	free(dump_ctx);
990 	return res;
991 }
992 #endif
993 
994 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
995 				 struct tee_ta_session *sess,
996 				 const TEE_Identity *clnt_id)
997 {
998 	*err = TEE_ORIGIN_TEE;
999 
1000 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
1001 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1002 
1003 	sess->cancel = true;
1004 	return TEE_SUCCESS;
1005 }
1006 
1007 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1008 {
1009 	TEE_Time current_time;
1010 
1011 	if (s->cancel_mask)
1012 		return false;
1013 
1014 	if (s->cancel)
1015 		return true;
1016 
1017 	if (s->cancel_time.seconds == UINT32_MAX)
1018 		return false;
1019 
1020 	if (curr_time != NULL)
1021 		current_time = *curr_time;
1022 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
1023 		return false;
1024 
1025 	if (current_time.seconds > s->cancel_time.seconds ||
1026 	    (current_time.seconds == s->cancel_time.seconds &&
1027 	     current_time.millis >= s->cancel_time.millis)) {
1028 		return true;
1029 	}
1030 
1031 	return false;
1032 }
1033 
1034 #if defined(CFG_TA_GPROF_SUPPORT)
1035 void tee_ta_gprof_sample_pc(vaddr_t pc)
1036 {
1037 	struct ts_session *s = ts_get_current_session();
1038 	struct user_ta_ctx *utc = NULL;
1039 	struct sample_buf *sbuf = NULL;
1040 	TEE_Result res = 0;
1041 	size_t idx = 0;
1042 
1043 	sbuf = s->sbuf;
1044 	if (!sbuf || !sbuf->enabled)
1045 		return; /* PC sampling is not enabled */
1046 
1047 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1048 	if (idx < sbuf->nsamples) {
1049 		utc = to_user_ta_ctx(s->ctx);
1050 		res = vm_check_access_rights(&utc->uctx,
1051 					     TEE_MEMORY_ACCESS_READ |
1052 					     TEE_MEMORY_ACCESS_WRITE |
1053 					     TEE_MEMORY_ACCESS_ANY_OWNER,
1054 					     (uaddr_t)&sbuf->samples[idx],
1055 					     sizeof(*sbuf->samples));
1056 		if (res != TEE_SUCCESS)
1057 			return;
1058 		sbuf->samples[idx]++;
1059 	}
1060 	sbuf->count++;
1061 }
1062 
1063 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1064 				       uint64_t now)
1065 {
1066 	struct sample_buf *sbuf = s->sbuf;
1067 
1068 	if (!sbuf)
1069 		return;
1070 
1071 	if (suspend) {
1072 		assert(sbuf->usr_entered);
1073 		sbuf->usr += now - sbuf->usr_entered;
1074 		sbuf->usr_entered = 0;
1075 	} else {
1076 		assert(!sbuf->usr_entered);
1077 		if (!now)
1078 			now++; /* 0 is reserved */
1079 		sbuf->usr_entered = now;
1080 	}
1081 }
1082 
1083 /*
1084  * Update user-mode CPU time for the current session
1085  * @suspend: true if session is being suspended (leaving user mode), false if
1086  * it is resumed (entering user mode)
1087  */
1088 static void tee_ta_update_session_utime(bool suspend)
1089 {
1090 	struct ts_session *s = ts_get_current_session();
1091 	uint64_t now = barrier_read_counter_timer();
1092 
1093 	gprof_update_session_utime(suspend, s, now);
1094 }
1095 
1096 void tee_ta_update_session_utime_suspend(void)
1097 {
1098 	tee_ta_update_session_utime(true);
1099 }
1100 
1101 void tee_ta_update_session_utime_resume(void)
1102 {
1103 	tee_ta_update_session_utime(false);
1104 }
1105 #endif
1106 
1107 #if defined(CFG_FTRACE_SUPPORT)
1108 static void ftrace_update_times(bool suspend)
1109 {
1110 	struct ts_session *s = ts_get_current_session_may_fail();
1111 	struct ftrace_buf *fbuf = NULL;
1112 	uint64_t now = 0;
1113 	uint32_t i = 0;
1114 
1115 	if (!s)
1116 		return;
1117 
1118 	now = barrier_read_counter_timer();
1119 
1120 	fbuf = s->fbuf;
1121 	if (!fbuf)
1122 		return;
1123 
1124 	if (suspend) {
1125 		fbuf->suspend_time = now;
1126 	} else {
1127 		for (i = 0; i <= fbuf->ret_idx; i++)
1128 			fbuf->begin_time[i] += now - fbuf->suspend_time;
1129 	}
1130 }
1131 
1132 void tee_ta_ftrace_update_times_suspend(void)
1133 {
1134 	ftrace_update_times(true);
1135 }
1136 
1137 void tee_ta_ftrace_update_times_resume(void)
1138 {
1139 	ftrace_update_times(false);
1140 }
1141 #endif
1142 
1143 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1144 {
1145 	return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1146 }
1147