xref: /optee_os/core/kernel/tee_ta_manager.c (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <kernel/mutex.h>
9 #include <kernel/panic.h>
10 #include <kernel/pseudo_ta.h>
11 #include <kernel/secure_partition.h>
12 #include <kernel/tee_common.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/tee_ta_manager.h>
15 #include <kernel/tee_time.h>
16 #include <kernel/thread.h>
17 #include <kernel/user_mode_ctx.h>
18 #include <kernel/user_ta.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/mobj.h>
22 #include <mm/tee_mmu.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <tee_api_types.h>
27 #include <tee/entry_std.h>
28 #include <tee/tee_obj.h>
29 #include <tee/tee_svc_cryp.h>
30 #include <tee/tee_svc_storage.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36 
37 /* This mutex protects the critical section in tee_ta_init_session */
38 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
39 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
40 
41 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
42 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
43 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
44 static size_t tee_ta_single_instance_count;
45 #endif
46 
47 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
48 static void lock_single_instance(void)
49 {
50 }
51 
52 static void unlock_single_instance(void)
53 {
54 }
55 
56 static bool has_single_instance_lock(void)
57 {
58 	return false;
59 }
60 #else
61 static void lock_single_instance(void)
62 {
63 	/* Requires tee_ta_mutex to be held */
64 	if (tee_ta_single_instance_thread != thread_get_id()) {
65 		/* Wait until the single-instance lock is available. */
66 		while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
67 			condvar_wait(&tee_ta_cv, &tee_ta_mutex);
68 
69 		tee_ta_single_instance_thread = thread_get_id();
70 		assert(tee_ta_single_instance_count == 0);
71 	}
72 
73 	tee_ta_single_instance_count++;
74 }
75 
76 static void unlock_single_instance(void)
77 {
78 	/* Requires tee_ta_mutex to be held */
79 	assert(tee_ta_single_instance_thread == thread_get_id());
80 	assert(tee_ta_single_instance_count > 0);
81 
82 	tee_ta_single_instance_count--;
83 	if (tee_ta_single_instance_count == 0) {
84 		tee_ta_single_instance_thread = THREAD_ID_INVALID;
85 		condvar_signal(&tee_ta_cv);
86 	}
87 }
88 
89 static bool has_single_instance_lock(void)
90 {
91 	/* Requires tee_ta_mutex to be held */
92 	return tee_ta_single_instance_thread == thread_get_id();
93 }
94 #endif
95 
96 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
97 {
98 	bool rc = true;
99 
100 	if (ctx->flags & TA_FLAG_CONCURRENT)
101 		return true;
102 
103 	mutex_lock(&tee_ta_mutex);
104 
105 	if (ctx->initializing) {
106 		/*
107 		 * Context is still initializing and flags cannot be relied
108 		 * on for user TAs. Wait here until it's initialized.
109 		 */
110 		while (ctx->busy)
111 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
112 	}
113 
114 	if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
115 		lock_single_instance();
116 
117 	if (has_single_instance_lock()) {
118 		if (ctx->busy) {
119 			/*
120 			 * We're holding the single-instance lock and the
121 			 * TA is busy, as waiting now would only cause a
122 			 * dead-lock, we release the lock and return false.
123 			 */
124 			rc = false;
125 			if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
126 				unlock_single_instance();
127 		}
128 	} else {
129 		/*
130 		 * We're not holding the single-instance lock, we're free to
131 		 * wait for the TA to become available.
132 		 */
133 		while (ctx->busy)
134 			condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
135 	}
136 
137 	/* Either it's already true or we should set it to true */
138 	ctx->busy = true;
139 
140 	mutex_unlock(&tee_ta_mutex);
141 	return rc;
142 }
143 
144 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
145 {
146 	if (!tee_ta_try_set_busy(ctx))
147 		panic();
148 }
149 
150 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
151 {
152 	if (ctx->flags & TA_FLAG_CONCURRENT)
153 		return;
154 
155 	mutex_lock(&tee_ta_mutex);
156 
157 	assert(ctx->busy);
158 	ctx->busy = false;
159 	condvar_signal(&ctx->busy_cv);
160 
161 	if (!ctx->initializing && (ctx->flags & TA_FLAG_SINGLE_INSTANCE))
162 		unlock_single_instance();
163 
164 	ctx->initializing = false;
165 
166 	mutex_unlock(&tee_ta_mutex);
167 }
168 
169 static void dec_session_ref_count(struct tee_ta_session *s)
170 {
171 	assert(s->ref_count > 0);
172 	s->ref_count--;
173 	if (s->ref_count == 1)
174 		condvar_signal(&s->refc_cv);
175 }
176 
177 void tee_ta_put_session(struct tee_ta_session *s)
178 {
179 	mutex_lock(&tee_ta_mutex);
180 
181 	if (s->lock_thread == thread_get_id()) {
182 		s->lock_thread = THREAD_ID_INVALID;
183 		condvar_signal(&s->lock_cv);
184 	}
185 	dec_session_ref_count(s);
186 
187 	mutex_unlock(&tee_ta_mutex);
188 }
189 
190 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
191 			struct tee_ta_session_head *open_sessions)
192 {
193 	struct tee_ta_session *s = NULL;
194 	struct tee_ta_session *found = NULL;
195 
196 	TAILQ_FOREACH(s, open_sessions, link) {
197 		if (s->id == id) {
198 			found = s;
199 			break;
200 		}
201 	}
202 
203 	return found;
204 }
205 
206 struct tee_ta_session *tee_ta_find_session(uint32_t id,
207 			struct tee_ta_session_head *open_sessions)
208 {
209 	struct tee_ta_session *s = NULL;
210 
211 	mutex_lock(&tee_ta_mutex);
212 
213 	s = tee_ta_find_session_nolock(id, open_sessions);
214 
215 	mutex_unlock(&tee_ta_mutex);
216 
217 	return s;
218 }
219 
220 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
221 			struct tee_ta_session_head *open_sessions)
222 {
223 	struct tee_ta_session *s;
224 
225 	mutex_lock(&tee_ta_mutex);
226 
227 	while (true) {
228 		s = tee_ta_find_session_nolock(id, open_sessions);
229 		if (!s)
230 			break;
231 		if (s->unlink) {
232 			s = NULL;
233 			break;
234 		}
235 		s->ref_count++;
236 		if (!exclusive)
237 			break;
238 
239 		assert(s->lock_thread != thread_get_id());
240 
241 		while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
242 			condvar_wait(&s->lock_cv, &tee_ta_mutex);
243 
244 		if (s->unlink) {
245 			dec_session_ref_count(s);
246 			s = NULL;
247 			break;
248 		}
249 
250 		s->lock_thread = thread_get_id();
251 		break;
252 	}
253 
254 	mutex_unlock(&tee_ta_mutex);
255 	return s;
256 }
257 
258 static void tee_ta_unlink_session(struct tee_ta_session *s,
259 			struct tee_ta_session_head *open_sessions)
260 {
261 	mutex_lock(&tee_ta_mutex);
262 
263 	assert(s->ref_count >= 1);
264 	assert(s->lock_thread == thread_get_id());
265 	assert(!s->unlink);
266 
267 	s->unlink = true;
268 	condvar_broadcast(&s->lock_cv);
269 
270 	while (s->ref_count != 1)
271 		condvar_wait(&s->refc_cv, &tee_ta_mutex);
272 
273 	TAILQ_REMOVE(open_sessions, s, link);
274 
275 	mutex_unlock(&tee_ta_mutex);
276 }
277 
278 static void destroy_session(struct tee_ta_session *s,
279 			    struct tee_ta_session_head *open_sessions)
280 {
281 #if defined(CFG_FTRACE_SUPPORT)
282 	if (s->ctx && s->ctx->ops->dump_ftrace) {
283 		tee_ta_push_current_session(s);
284 		s->fbuf = NULL;
285 		s->ctx->ops->dump_ftrace(s->ctx);
286 		tee_ta_pop_current_session();
287 	}
288 #endif
289 
290 	tee_ta_unlink_session(s, open_sessions);
291 #if defined(CFG_TA_GPROF_SUPPORT)
292 	free(s->sbuf);
293 #endif
294 	free(s);
295 }
296 
297 static void destroy_context(struct tee_ta_ctx *ctx)
298 {
299 	DMSG("Destroy TA ctx (0x%" PRIxVA ")",  (vaddr_t)ctx);
300 
301 	condvar_destroy(&ctx->busy_cv);
302 	pgt_flush_ctx(ctx);
303 	ctx->ops->destroy(ctx);
304 }
305 
306 static void destroy_ta_ctx_from_session(struct tee_ta_session *s)
307 {
308 	struct tee_ta_session *sess = NULL;
309 	struct tee_ta_session_head *open_sessions = NULL;
310 	struct tee_ta_ctx *ctx = NULL;
311 	struct user_ta_ctx *utc = NULL;
312 	size_t count = 1; /* start counting the references to the context */
313 
314 	DMSG("Remove references to context (0x%" PRIxVA ")", (vaddr_t)s->ctx);
315 
316 	mutex_lock(&tee_ta_mutex);
317 	nsec_sessions_list_head(&open_sessions);
318 
319 	/*
320 	 * Next two loops will remove all references to the context which is
321 	 * about to be destroyed, but avoiding such operation to the current
322 	 * session. That will be done later in this function, only after
323 	 * the context will be properly destroyed.
324 	 */
325 
326 	/*
327 	 * Scan the entire list of opened sessions by the clients from
328 	 * non-secure world.
329 	 */
330 	TAILQ_FOREACH(sess, open_sessions, link) {
331 		if (sess->ctx == s->ctx && sess != s) {
332 			sess->ctx = NULL;
333 			count++;
334 		}
335 	}
336 
337 	/*
338 	 * Scan all sessions opened from secure side by searching through
339 	 * all available TA instances and for each context, scan all opened
340 	 * sessions.
341 	 */
342 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
343 		if (is_user_ta_ctx(ctx)) {
344 			utc = to_user_ta_ctx(ctx);
345 
346 			TAILQ_FOREACH(sess, &utc->open_sessions, link) {
347 				if (sess->ctx == s->ctx && sess != s) {
348 					sess->ctx = NULL;
349 					count++;
350 				}
351 			}
352 		}
353 	}
354 
355 	assert(count == s->ctx->ref_count);
356 
357 	TAILQ_REMOVE(&tee_ctxes, s->ctx, link);
358 	mutex_unlock(&tee_ta_mutex);
359 
360 	destroy_context(s->ctx);
361 
362 	s->ctx = NULL;
363 }
364 
365 /*
366  * tee_ta_context_find - Find TA in session list based on a UUID (input)
367  * Returns a pointer to the session
368  */
369 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
370 {
371 	struct tee_ta_ctx *ctx;
372 
373 	TAILQ_FOREACH(ctx, &tee_ctxes, link) {
374 		if (memcmp(&ctx->uuid, uuid, sizeof(TEE_UUID)) == 0)
375 			return ctx;
376 	}
377 
378 	return NULL;
379 }
380 
381 /* check if requester (client ID) matches session initial client */
382 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
383 {
384 	if (id == KERN_IDENTITY)
385 		return TEE_SUCCESS;
386 
387 	if (id == NSAPP_IDENTITY) {
388 		if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
389 			DMSG("nsec tries to hijack TA session");
390 			return TEE_ERROR_ACCESS_DENIED;
391 		}
392 		return TEE_SUCCESS;
393 	}
394 
395 	if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
396 		DMSG("client id mismatch");
397 		return TEE_ERROR_ACCESS_DENIED;
398 	}
399 	return TEE_SUCCESS;
400 }
401 
402 /*
403  * Check if invocation parameters matches TA properties
404  *
405  * @s - current session handle
406  * @param - already identified memory references hold a valid 'mobj'.
407  *
408  * Policy:
409  * - All TAs can access 'non-secure' shared memory.
410  * - All TAs can access TEE private memory (seccpy)
411  * - Only SDP flagged TAs can accept SDP memory references.
412  */
413 #ifndef CFG_SECURE_DATA_PATH
414 static bool check_params(struct tee_ta_session *sess __unused,
415 			 struct tee_ta_param *param __unused)
416 {
417 	/*
418 	 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
419 	 * are rejected at OP-TEE core entry. Hence here all TAs have same
420 	 * permissions regarding memory reference parameters.
421 	 */
422 	return true;
423 }
424 #else
425 static bool check_params(struct tee_ta_session *sess,
426 			 struct tee_ta_param *param)
427 {
428 	int n;
429 
430 	/*
431 	 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
432 	 * SDP memory references. Only TAs flagged SDP can access SDP memory.
433 	 */
434 	if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
435 		return true;
436 
437 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
438 		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
439 		struct param_mem *mem = &param->u[n].mem;
440 
441 		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
442 		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
443 		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
444 			continue;
445 		if (!mem->size)
446 			continue;
447 		if (mobj_is_sdp_mem(mem->mobj))
448 			return false;
449 	}
450 	return true;
451 }
452 #endif
453 
454 static void set_invoke_timeout(struct tee_ta_session *sess,
455 				      uint32_t cancel_req_to)
456 {
457 	TEE_Time current_time;
458 	TEE_Time cancel_time;
459 
460 	if (cancel_req_to == TEE_TIMEOUT_INFINITE)
461 		goto infinite;
462 
463 	if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
464 		goto infinite;
465 
466 	if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
467 			 &cancel_time.seconds))
468 		goto infinite;
469 
470 	cancel_time.millis = current_time.millis + cancel_req_to % 1000;
471 	if (cancel_time.millis > 1000) {
472 		if (ADD_OVERFLOW(current_time.seconds, 1,
473 				 &cancel_time.seconds))
474 			goto infinite;
475 
476 		cancel_time.seconds++;
477 		cancel_time.millis -= 1000;
478 	}
479 
480 	sess->cancel_time = cancel_time;
481 	return;
482 
483 infinite:
484 	sess->cancel_time.seconds = UINT32_MAX;
485 	sess->cancel_time.millis = UINT32_MAX;
486 }
487 
488 /*-----------------------------------------------------------------------------
489  * Close a Trusted Application and free available resources
490  *---------------------------------------------------------------------------*/
491 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
492 				struct tee_ta_session_head *open_sessions,
493 				const TEE_Identity *clnt_id)
494 {
495 	struct tee_ta_session *sess;
496 	struct tee_ta_ctx *ctx;
497 	bool keep_alive;
498 
499 	DMSG("csess 0x%" PRIxVA " id %u",
500 	     (vaddr_t)csess, csess ? csess->id : UINT_MAX);
501 
502 	if (!csess)
503 		return TEE_ERROR_ITEM_NOT_FOUND;
504 
505 	sess = tee_ta_get_session(csess->id, true, open_sessions);
506 
507 	if (!sess) {
508 		EMSG("session 0x%" PRIxVA " to be removed is not found",
509 		     (vaddr_t)csess);
510 		return TEE_ERROR_ITEM_NOT_FOUND;
511 	}
512 
513 	if (check_client(sess, clnt_id) != TEE_SUCCESS) {
514 		tee_ta_put_session(sess);
515 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
516 	}
517 
518 	ctx = sess->ctx;
519 	DMSG("Destroy session");
520 
521 	if (!ctx) {
522 		destroy_session(sess, open_sessions);
523 		return TEE_SUCCESS;
524 	}
525 
526 	if (ctx->panicked) {
527 		destroy_session(sess, open_sessions);
528 	} else {
529 		tee_ta_set_busy(ctx);
530 		set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
531 		ctx->ops->enter_close_session(sess);
532 		destroy_session(sess, open_sessions);
533 		tee_ta_clear_busy(ctx);
534 	}
535 
536 	mutex_lock(&tee_ta_mutex);
537 
538 	if (ctx->ref_count <= 0)
539 		panic();
540 
541 	ctx->ref_count--;
542 	keep_alive = (ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE) &&
543 			(ctx->flags & TA_FLAG_SINGLE_INSTANCE);
544 	if (!ctx->ref_count && !keep_alive) {
545 		TAILQ_REMOVE(&tee_ctxes, ctx, link);
546 		mutex_unlock(&tee_ta_mutex);
547 
548 		destroy_context(ctx);
549 	} else
550 		mutex_unlock(&tee_ta_mutex);
551 
552 	return TEE_SUCCESS;
553 }
554 
555 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_ctx *ctx,
556 			struct tee_ta_session *s)
557 {
558 	/*
559 	 * If TA isn't single instance it should be loaded as new
560 	 * instance instead of doing anything with this instance.
561 	 * So tell the caller that we didn't find the TA it the
562 	 * caller will load a new instance.
563 	 */
564 	if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
565 		return TEE_ERROR_ITEM_NOT_FOUND;
566 
567 	/*
568 	 * The TA is single instance, if it isn't multi session we
569 	 * can't create another session unless its reference is zero
570 	 */
571 	if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
572 		return TEE_ERROR_BUSY;
573 
574 	DMSG("Re-open TA %pUl", (void *)&ctx->uuid);
575 
576 	ctx->ref_count++;
577 	s->ctx = ctx;
578 	return TEE_SUCCESS;
579 }
580 
581 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
582 {
583 	struct tee_ta_session *last = NULL;
584 	uint32_t saved = 0;
585 	uint32_t id = 1;
586 
587 	last = TAILQ_LAST(open_sessions, tee_ta_session_head);
588 	if (last) {
589 		/* This value is less likely to be already used */
590 		id = last->id + 1;
591 		if (!id)
592 			id++; /* 0 is not valid */
593 	}
594 
595 	saved = id;
596 	do {
597 		if (!tee_ta_find_session_nolock(id, open_sessions))
598 			return id;
599 		id++;
600 		if (!id)
601 			id++;
602 	} while (id != saved);
603 
604 	return 0;
605 }
606 
607 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
608 				struct tee_ta_session_head *open_sessions,
609 				const TEE_UUID *uuid,
610 				struct tee_ta_session **sess)
611 {
612 	TEE_Result res;
613 	struct tee_ta_ctx *ctx;
614 	struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
615 
616 	*err = TEE_ORIGIN_TEE;
617 	if (!s)
618 		return TEE_ERROR_OUT_OF_MEMORY;
619 
620 	s->cancel_mask = true;
621 	condvar_init(&s->refc_cv);
622 	condvar_init(&s->lock_cv);
623 	s->lock_thread = THREAD_ID_INVALID;
624 	s->ref_count = 1;
625 
626 
627 	/*
628 	 * We take the global TA mutex here and hold it while doing
629 	 * RPC to load the TA. This big critical section should be broken
630 	 * down into smaller pieces.
631 	 */
632 
633 
634 	mutex_lock(&tee_ta_mutex);
635 	s->id = new_session_id(open_sessions);
636 	if (!s->id) {
637 		res = TEE_ERROR_OVERFLOW;
638 		goto out;
639 	}
640 	TAILQ_INSERT_TAIL(open_sessions, s, link);
641 
642 	/* Look for already loaded TA */
643 	ctx = tee_ta_context_find(uuid);
644 	if (ctx) {
645 		res = tee_ta_init_session_with_context(ctx, s);
646 		if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
647 			goto out;
648 	}
649 
650 	/* Look for secure partition */
651 	res = sec_part_init_session(uuid, s);
652 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
653 		goto out;
654 
655 	/* Look for pseudo TA */
656 	res = tee_ta_init_pseudo_ta_session(uuid, s);
657 	if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND)
658 		goto out;
659 
660 	/* Look for user TA */
661 	res = tee_ta_init_user_ta_session(uuid, s);
662 
663 out:
664 	if (res == TEE_SUCCESS) {
665 		*sess = s;
666 	} else {
667 		TAILQ_REMOVE(open_sessions, s, link);
668 		free(s);
669 	}
670 	mutex_unlock(&tee_ta_mutex);
671 	return res;
672 }
673 
674 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
675 			       struct tee_ta_session **sess,
676 			       struct tee_ta_session_head *open_sessions,
677 			       const TEE_UUID *uuid,
678 			       const TEE_Identity *clnt_id,
679 			       uint32_t cancel_req_to,
680 			       struct tee_ta_param *param)
681 {
682 	TEE_Result res;
683 	struct tee_ta_session *s = NULL;
684 	struct tee_ta_ctx *ctx;
685 	bool panicked;
686 	bool was_busy = false;
687 
688 	res = tee_ta_init_session(err, open_sessions, uuid, &s);
689 	if (res != TEE_SUCCESS) {
690 		DMSG("init session failed 0x%x", res);
691 		return res;
692 	}
693 
694 	if (!check_params(s, param))
695 		return TEE_ERROR_BAD_PARAMETERS;
696 
697 	ctx = s->ctx;
698 
699 	if (!ctx || ctx->panicked) {
700 		DMSG("panicked, call tee_ta_close_session()");
701 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
702 		*err = TEE_ORIGIN_TEE;
703 		return TEE_ERROR_TARGET_DEAD;
704 	}
705 
706 	*sess = s;
707 	/* Save identity of the owner of the session */
708 	s->clnt_id = *clnt_id;
709 
710 	if (tee_ta_try_set_busy(ctx)) {
711 		set_invoke_timeout(s, cancel_req_to);
712 		res = ctx->ops->enter_open_session(s, param, err);
713 		tee_ta_clear_busy(ctx);
714 	} else {
715 		/* Deadlock avoided */
716 		res = TEE_ERROR_BUSY;
717 		was_busy = true;
718 	}
719 
720 	panicked = ctx->panicked;
721 
722 	tee_ta_put_session(s);
723 	if (panicked || (res != TEE_SUCCESS))
724 		tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
725 
726 	/*
727 	 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
728 	 * apart from panicking.
729 	 */
730 	if (panicked || was_busy)
731 		*err = TEE_ORIGIN_TEE;
732 
733 	if (res != TEE_SUCCESS)
734 		EMSG("Failed. Return error 0x%x", res);
735 
736 	return res;
737 }
738 
739 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
740 				 struct tee_ta_session *sess,
741 				 const TEE_Identity *clnt_id,
742 				 uint32_t cancel_req_to, uint32_t cmd,
743 				 struct tee_ta_param *param)
744 {
745 	TEE_Result res;
746 
747 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
748 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
749 
750 	if (!check_params(sess, param))
751 		return TEE_ERROR_BAD_PARAMETERS;
752 
753 	if (!sess->ctx) {
754 		/* The context has been already destroyed */
755 		*err = TEE_ORIGIN_TEE;
756 		return TEE_ERROR_TARGET_DEAD;
757 	} else if (sess->ctx->panicked) {
758 		DMSG("Panicked !");
759 		destroy_ta_ctx_from_session(sess);
760 		*err = TEE_ORIGIN_TEE;
761 		return TEE_ERROR_TARGET_DEAD;
762 	}
763 
764 	tee_ta_set_busy(sess->ctx);
765 
766 	set_invoke_timeout(sess, cancel_req_to);
767 	res = sess->ctx->ops->enter_invoke_cmd(sess, cmd, param, err);
768 
769 	tee_ta_clear_busy(sess->ctx);
770 
771 	if (sess->ctx->panicked) {
772 		destroy_ta_ctx_from_session(sess);
773 		*err = TEE_ORIGIN_TEE;
774 		return TEE_ERROR_TARGET_DEAD;
775 	}
776 
777 	/* Short buffer is not an effective error case */
778 	if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
779 		DMSG("Error: %x of %d", res, *err);
780 
781 	return res;
782 }
783 
784 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
785 				 struct tee_ta_session *sess,
786 				 const TEE_Identity *clnt_id)
787 {
788 	*err = TEE_ORIGIN_TEE;
789 
790 	if (check_client(sess, clnt_id) != TEE_SUCCESS)
791 		return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
792 
793 	sess->cancel = true;
794 	return TEE_SUCCESS;
795 }
796 
797 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
798 {
799 	TEE_Time current_time;
800 
801 	if (s->cancel_mask)
802 		return false;
803 
804 	if (s->cancel)
805 		return true;
806 
807 	if (s->cancel_time.seconds == UINT32_MAX)
808 		return false;
809 
810 	if (curr_time != NULL)
811 		current_time = *curr_time;
812 	else if (tee_time_get_sys_time(&current_time) != TEE_SUCCESS)
813 		return false;
814 
815 	if (current_time.seconds > s->cancel_time.seconds ||
816 	    (current_time.seconds == s->cancel_time.seconds &&
817 	     current_time.millis >= s->cancel_time.millis)) {
818 		return true;
819 	}
820 
821 	return false;
822 }
823 
824 static void update_current_ctx(struct thread_specific_data *tsd)
825 {
826 	struct tee_ta_ctx *ctx = NULL;
827 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
828 
829 	if (s) {
830 		if (is_pseudo_ta_ctx(s->ctx))
831 			s = TAILQ_NEXT(s, link_tsd);
832 
833 		if (s)
834 			ctx = s->ctx;
835 	}
836 
837 	if (tsd->ctx != ctx)
838 		tee_mmu_set_ctx(ctx);
839 	/*
840 	 * If current context is of user mode, then it has to be active too.
841 	 */
842 	if (is_user_mode_ctx(ctx) != core_mmu_user_mapping_is_active())
843 		panic("unexpected active mapping");
844 }
845 
846 void tee_ta_push_current_session(struct tee_ta_session *sess)
847 {
848 	struct thread_specific_data *tsd = thread_get_tsd();
849 
850 	TAILQ_INSERT_HEAD(&tsd->sess_stack, sess, link_tsd);
851 	update_current_ctx(tsd);
852 }
853 
854 struct tee_ta_session *tee_ta_pop_current_session(void)
855 {
856 	struct thread_specific_data *tsd = thread_get_tsd();
857 	struct tee_ta_session *s = TAILQ_FIRST(&tsd->sess_stack);
858 
859 	if (s) {
860 		TAILQ_REMOVE(&tsd->sess_stack, s, link_tsd);
861 		update_current_ctx(tsd);
862 	}
863 	return s;
864 }
865 
866 TEE_Result tee_ta_get_current_session(struct tee_ta_session **sess)
867 {
868 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
869 
870 	if (!s)
871 		return TEE_ERROR_BAD_STATE;
872 	*sess = s;
873 	return TEE_SUCCESS;
874 }
875 
876 struct tee_ta_session *tee_ta_get_calling_session(void)
877 {
878 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
879 
880 	if (s)
881 		s = TAILQ_NEXT(s, link_tsd);
882 	return s;
883 }
884 
885 #if defined(CFG_TA_GPROF_SUPPORT)
886 void tee_ta_gprof_sample_pc(vaddr_t pc)
887 {
888 	struct tee_ta_session *s = NULL;
889 	struct user_ta_ctx *utc = NULL;
890 	struct sample_buf *sbuf = NULL;
891 	TEE_Result res = 0;
892 	size_t idx = 0;
893 
894 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
895 		return;
896 	sbuf = s->sbuf;
897 	if (!sbuf || !sbuf->enabled)
898 		return; /* PC sampling is not enabled */
899 
900 	idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
901 	if (idx < sbuf->nsamples) {
902 		utc = to_user_ta_ctx(s->ctx);
903 		res = tee_mmu_check_access_rights(&utc->uctx,
904 						  TEE_MEMORY_ACCESS_READ |
905 						  TEE_MEMORY_ACCESS_WRITE |
906 						  TEE_MEMORY_ACCESS_ANY_OWNER,
907 						  (uaddr_t)&sbuf->samples[idx],
908 						  sizeof(*sbuf->samples));
909 		if (res != TEE_SUCCESS)
910 			return;
911 		sbuf->samples[idx]++;
912 	}
913 	sbuf->count++;
914 }
915 
916 static void gprof_update_session_utime(bool suspend, struct tee_ta_session *s,
917 				       uint64_t now)
918 {
919 	struct sample_buf *sbuf = NULL;
920 
921 	sbuf = s->sbuf;
922 	if (!sbuf)
923 		return;
924 
925 	if (suspend) {
926 		assert(sbuf->usr_entered);
927 		sbuf->usr += now - sbuf->usr_entered;
928 		sbuf->usr_entered = 0;
929 	} else {
930 		assert(!sbuf->usr_entered);
931 		if (!now)
932 			now++; /* 0 is reserved */
933 		sbuf->usr_entered = now;
934 	}
935 }
936 
937 /*
938  * Update user-mode CPU time for the current session
939  * @suspend: true if session is being suspended (leaving user mode), false if
940  * it is resumed (entering user mode)
941  */
942 static void tee_ta_update_session_utime(bool suspend)
943 {
944 	struct tee_ta_session *s = NULL;
945 	uint64_t now = 0;
946 
947 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
948 		return;
949 
950 	now = read_cntpct();
951 
952 	gprof_update_session_utime(suspend, s, now);
953 }
954 
955 void tee_ta_update_session_utime_suspend(void)
956 {
957 	tee_ta_update_session_utime(true);
958 }
959 
960 void tee_ta_update_session_utime_resume(void)
961 {
962 	tee_ta_update_session_utime(false);
963 }
964 #endif
965 
966 #if defined(CFG_FTRACE_SUPPORT)
967 static void ftrace_update_times(bool suspend)
968 {
969 	struct tee_ta_session *s = NULL;
970 	struct ftrace_buf *fbuf = NULL;
971 	uint64_t now = 0;
972 	uint32_t i = 0;
973 
974 	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
975 		return;
976 
977 	now = read_cntpct();
978 
979 	fbuf = s->fbuf;
980 	if (!fbuf)
981 		return;
982 
983 	if (suspend) {
984 		fbuf->suspend_time = now;
985 	} else {
986 		for (i = 0; i <= fbuf->ret_idx; i++)
987 			fbuf->begin_time[i] += now - fbuf->suspend_time;
988 	}
989 }
990 
991 void tee_ta_ftrace_update_times_suspend(void)
992 {
993 	ftrace_update_times(true);
994 }
995 
996 void tee_ta_ftrace_update_times_resume(void)
997 {
998 	ftrace_update_times(false);
999 }
1000 #endif
1001