1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2020, Arm Limited
5 * Copyright (c) 2025, NVIDIA Corporation & AFFILIATES.
6 */
7
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/panic.h>
11 #include <kernel/pseudo_ta.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tee_ta_manager.h>
16 #include <kernel/tee_time.h>
17 #include <kernel/thread.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/user_ta.h>
20 #include <malloc.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <mm/vm.h>
25 #include <pta_stats.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <tee_api_types.h>
29 #include <tee/entry_std.h>
30 #include <tee/tee_obj.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36
37 #if defined(CFG_TA_STATS)
38 #define MAX_DUMP_SESS_NUM (16)
39
40 struct tee_ta_dump_ctx {
41 TEE_UUID uuid;
42 uint32_t panicked;
43 bool is_user_ta;
44 uint32_t sess_num;
45 uint32_t sess_id[MAX_DUMP_SESS_NUM];
46 };
47 #endif
48
49 /* This mutex protects the critical section in tee_ta_init_session */
50 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
51 /* This condvar is used when waiting for a TA context to become initialized */
52 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
53 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
54
55 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
56 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
57 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
58 static size_t tee_ta_single_instance_count;
59 #endif
60
61 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
lock_single_instance(void)62 static void lock_single_instance(void)
63 {
64 }
65
unlock_single_instance(void)66 static void unlock_single_instance(void)
67 {
68 }
69
has_single_instance_lock(void)70 static bool has_single_instance_lock(void)
71 {
72 return false;
73 }
74 #else
lock_single_instance(void)75 static void lock_single_instance(void)
76 {
77 /* Requires tee_ta_mutex to be held */
78 if (tee_ta_single_instance_thread != thread_get_id()) {
79 /* Wait until the single-instance lock is available. */
80 while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
81 condvar_wait(&tee_ta_cv, &tee_ta_mutex);
82
83 tee_ta_single_instance_thread = thread_get_id();
84 assert(tee_ta_single_instance_count == 0);
85 }
86
87 tee_ta_single_instance_count++;
88 }
89
unlock_single_instance(void)90 static void unlock_single_instance(void)
91 {
92 /* Requires tee_ta_mutex to be held */
93 assert(tee_ta_single_instance_thread == thread_get_id());
94 assert(tee_ta_single_instance_count > 0);
95
96 tee_ta_single_instance_count--;
97 if (tee_ta_single_instance_count == 0) {
98 tee_ta_single_instance_thread = THREAD_ID_INVALID;
99 condvar_signal(&tee_ta_cv);
100 }
101 }
102
has_single_instance_lock(void)103 static bool has_single_instance_lock(void)
104 {
105 /* Requires tee_ta_mutex to be held */
106 return tee_ta_single_instance_thread == thread_get_id();
107 }
108 #endif
109
to_ta_session(struct ts_session * sess)110 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
111 {
112 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
113 return container_of(sess, struct tee_ta_session, ts_sess);
114 }
115
ts_to_ta_ctx(struct ts_ctx * ctx)116 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
117 {
118 if (is_ta_ctx(ctx))
119 return to_ta_ctx(ctx);
120
121 if (is_stmm_ctx(ctx))
122 return &(to_stmm_ctx(ctx)->ta_ctx);
123
124 panic("bad context");
125 }
126
tee_ta_try_set_busy(struct tee_ta_ctx * ctx)127 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
128 {
129 bool rc = true;
130
131 if (ctx->flags & TA_FLAG_CONCURRENT)
132 return true;
133
134 mutex_lock(&tee_ta_mutex);
135
136 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
137 lock_single_instance();
138
139 if (has_single_instance_lock()) {
140 if (ctx->busy) {
141 /*
142 * We're holding the single-instance lock and the
143 * TA is busy, as waiting now would only cause a
144 * dead-lock, we release the lock and return false.
145 */
146 rc = false;
147 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
148 unlock_single_instance();
149 }
150 } else {
151 /*
152 * We're not holding the single-instance lock, we're free to
153 * wait for the TA to become available.
154 */
155 while (ctx->busy)
156 condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
157 }
158
159 /* Either it's already true or we should set it to true */
160 ctx->busy = true;
161
162 mutex_unlock(&tee_ta_mutex);
163 return rc;
164 }
165
tee_ta_set_busy(struct tee_ta_ctx * ctx)166 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
167 {
168 if (!tee_ta_try_set_busy(ctx))
169 panic();
170 }
171
tee_ta_clear_busy(struct tee_ta_ctx * ctx)172 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
173 {
174 if (ctx->flags & TA_FLAG_CONCURRENT)
175 return;
176
177 mutex_lock(&tee_ta_mutex);
178
179 assert(ctx->busy);
180 ctx->busy = false;
181 condvar_signal(&ctx->busy_cv);
182
183 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
184 unlock_single_instance();
185
186 mutex_unlock(&tee_ta_mutex);
187 }
188
dec_session_ref_count(struct tee_ta_session * s)189 static void dec_session_ref_count(struct tee_ta_session *s)
190 {
191 assert(s->ref_count > 0);
192 s->ref_count--;
193 if (s->ref_count == 1)
194 condvar_signal(&s->refc_cv);
195 }
196
tee_ta_put_session(struct tee_ta_session * s)197 void tee_ta_put_session(struct tee_ta_session *s)
198 {
199 mutex_lock(&tee_ta_mutex);
200
201 if (s->lock_thread == thread_get_id()) {
202 s->lock_thread = THREAD_ID_INVALID;
203 condvar_signal(&s->lock_cv);
204 }
205 dec_session_ref_count(s);
206
207 mutex_unlock(&tee_ta_mutex);
208 }
209
tee_ta_find_session_nolock(uint32_t id,struct tee_ta_session_head * open_sessions)210 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
211 struct tee_ta_session_head *open_sessions)
212 {
213 struct tee_ta_session *s = NULL;
214 struct tee_ta_session *found = NULL;
215
216 TAILQ_FOREACH(s, open_sessions, link) {
217 if (s->id == id) {
218 found = s;
219 break;
220 }
221 }
222
223 return found;
224 }
225
tee_ta_find_session(uint32_t id,struct tee_ta_session_head * open_sessions)226 struct tee_ta_session *tee_ta_find_session(uint32_t id,
227 struct tee_ta_session_head *open_sessions)
228 {
229 struct tee_ta_session *s = NULL;
230
231 mutex_lock(&tee_ta_mutex);
232
233 s = tee_ta_find_session_nolock(id, open_sessions);
234
235 mutex_unlock(&tee_ta_mutex);
236
237 return s;
238 }
239
tee_ta_get_session(uint32_t id,bool exclusive,struct tee_ta_session_head * open_sessions)240 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
241 struct tee_ta_session_head *open_sessions)
242 {
243 struct tee_ta_session *s;
244
245 mutex_lock(&tee_ta_mutex);
246
247 while (true) {
248 s = tee_ta_find_session_nolock(id, open_sessions);
249 if (!s)
250 break;
251 if (s->unlink) {
252 s = NULL;
253 break;
254 }
255 s->ref_count++;
256 if (!exclusive)
257 break;
258
259 assert(s->lock_thread != thread_get_id());
260
261 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
262 condvar_wait(&s->lock_cv, &tee_ta_mutex);
263
264 if (s->unlink) {
265 dec_session_ref_count(s);
266 s = NULL;
267 break;
268 }
269
270 s->lock_thread = thread_get_id();
271 break;
272 }
273
274 mutex_unlock(&tee_ta_mutex);
275 return s;
276 }
277
tee_ta_unlink_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)278 static void tee_ta_unlink_session(struct tee_ta_session *s,
279 struct tee_ta_session_head *open_sessions)
280 {
281 mutex_lock(&tee_ta_mutex);
282
283 assert(s->ref_count >= 1);
284 assert(s->lock_thread == thread_get_id());
285 assert(!s->unlink);
286
287 s->unlink = true;
288 condvar_broadcast(&s->lock_cv);
289
290 while (s->ref_count != 1)
291 condvar_wait(&s->refc_cv, &tee_ta_mutex);
292
293 TAILQ_REMOVE(open_sessions, s, link);
294
295 mutex_unlock(&tee_ta_mutex);
296 }
297
destroy_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)298 static void destroy_session(struct tee_ta_session *s,
299 struct tee_ta_session_head *open_sessions)
300 {
301 #if defined(CFG_FTRACE_SUPPORT)
302 if (s->ts_sess.ctx && s->ts_sess.ctx->ops->dump_ftrace) {
303 ts_push_current_session(&s->ts_sess);
304 s->ts_sess.fbuf = NULL;
305 s->ts_sess.ctx->ops->dump_ftrace(s->ts_sess.ctx);
306 ts_pop_current_session();
307 }
308 #endif
309
310 tee_ta_unlink_session(s, open_sessions);
311 #if defined(CFG_TA_GPROF_SUPPORT)
312 free(s->ts_sess.sbuf);
313 #endif
314 free(s);
315 }
316
destroy_context(struct tee_ta_ctx * ctx)317 static void destroy_context(struct tee_ta_ctx *ctx)
318 {
319 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx);
320
321 condvar_destroy(&ctx->busy_cv);
322 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
323 }
324
325 /*
326 * tee_ta_context_find - Find TA in session list based on a UUID (input)
327 * Returns a pointer to the session
328 */
tee_ta_context_find(const TEE_UUID * uuid)329 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
330 {
331 struct tee_ta_ctx *ctx;
332
333 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
334 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
335 return ctx;
336 }
337
338 return NULL;
339 }
340
341 /* check if requester (client ID) matches session initial client */
check_client(struct tee_ta_session * s,const TEE_Identity * id)342 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
343 {
344 if (id == KERN_IDENTITY)
345 return TEE_SUCCESS;
346
347 if (id == NSAPP_IDENTITY) {
348 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
349 DMSG("nsec tries to hijack TA session");
350 return TEE_ERROR_ACCESS_DENIED;
351 }
352 return TEE_SUCCESS;
353 }
354
355 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
356 DMSG("client id mismatch");
357 return TEE_ERROR_ACCESS_DENIED;
358 }
359 return TEE_SUCCESS;
360 }
361
362 /*
363 * Check if invocation parameters matches TA properties
364 *
365 * @s - current session handle
366 * @param - already identified memory references hold a valid 'mobj'.
367 *
368 * Policy:
369 * - All TAs can access 'non-secure' shared memory.
370 * - All TAs can access TEE private memory (seccpy)
371 * - Only SDP flagged TAs can accept SDP memory references.
372 */
373 #ifndef CFG_SECURE_DATA_PATH
check_params(struct tee_ta_session * sess __unused,struct tee_ta_param * param __unused)374 static bool check_params(struct tee_ta_session *sess __unused,
375 struct tee_ta_param *param __unused)
376 {
377 /*
378 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
379 * are rejected at OP-TEE core entry. Hence here all TAs have same
380 * permissions regarding memory reference parameters.
381 */
382 return true;
383 }
384 #else
check_params(struct tee_ta_session * sess,struct tee_ta_param * param)385 static bool check_params(struct tee_ta_session *sess,
386 struct tee_ta_param *param)
387 {
388 int n;
389
390 /*
391 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
392 * SDP memory references. Only TAs flagged SDP can access SDP memory.
393 */
394 if (sess->ts_sess.ctx &&
395 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
396 return true;
397
398 for (n = 0; n < TEE_NUM_PARAMS; n++) {
399 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
400 struct param_mem *mem = ¶m->u[n].mem;
401
402 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
403 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
404 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
405 continue;
406 if (!mem->size)
407 continue;
408 if (mobj_is_sdp_mem(mem->mobj))
409 return false;
410 }
411 return true;
412 }
413 #endif
414
set_invoke_timeout(struct tee_ta_session * sess,uint32_t cancel_req_to)415 static void set_invoke_timeout(struct tee_ta_session *sess,
416 uint32_t cancel_req_to)
417 {
418 TEE_Time current_time;
419 TEE_Time cancel_time;
420
421 if (cancel_req_to == TEE_TIMEOUT_INFINITE)
422 goto infinite;
423
424 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
425 goto infinite;
426
427 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
428 &cancel_time.seconds))
429 goto infinite;
430
431 cancel_time.millis = current_time.millis + cancel_req_to % 1000;
432 if (cancel_time.millis > 1000) {
433 if (ADD_OVERFLOW(current_time.seconds, 1,
434 &cancel_time.seconds))
435 goto infinite;
436
437 cancel_time.seconds++;
438 cancel_time.millis -= 1000;
439 }
440
441 sess->cancel_time = cancel_time;
442 return;
443
444 infinite:
445 sess->cancel_time.seconds = UINT32_MAX;
446 sess->cancel_time.millis = UINT32_MAX;
447 }
448
449 /*-----------------------------------------------------------------------------
450 * Close a Trusted Application and free available resources
451 *---------------------------------------------------------------------------*/
tee_ta_close_session(struct tee_ta_session * csess,struct tee_ta_session_head * open_sessions,const TEE_Identity * clnt_id)452 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
453 struct tee_ta_session_head *open_sessions,
454 const TEE_Identity *clnt_id)
455 {
456 struct tee_ta_session *sess = NULL;
457 struct tee_ta_ctx *ctx = NULL;
458 struct ts_ctx *ts_ctx = NULL;
459 bool keep_crashed = false;
460 bool keep_alive = false;
461
462 DMSG("csess 0x%" PRIxVA " id %u",
463 (vaddr_t)csess, csess ? csess->id : UINT_MAX);
464
465 if (!csess)
466 return TEE_ERROR_ITEM_NOT_FOUND;
467
468 sess = tee_ta_get_session(csess->id, true, open_sessions);
469
470 if (!sess) {
471 EMSG("session 0x%" PRIxVA " to be removed is not found",
472 (vaddr_t)csess);
473 return TEE_ERROR_ITEM_NOT_FOUND;
474 }
475
476 if (check_client(sess, clnt_id) != TEE_SUCCESS) {
477 tee_ta_put_session(sess);
478 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
479 }
480
481 DMSG("Destroy session");
482
483 ts_ctx = sess->ts_sess.ctx;
484 if (!ts_ctx) {
485 destroy_session(sess, open_sessions);
486 return TEE_SUCCESS;
487 }
488
489 ctx = ts_to_ta_ctx(ts_ctx);
490 if (ctx->panicked) {
491 destroy_session(sess, open_sessions);
492 } else {
493 tee_ta_set_busy(ctx);
494 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
495 ts_ctx->ops->enter_close_session(&sess->ts_sess);
496 destroy_session(sess, open_sessions);
497 tee_ta_clear_busy(ctx);
498 }
499
500 mutex_lock(&tee_ta_mutex);
501
502 if (ctx->ref_count <= 0)
503 panic();
504
505 ctx->ref_count--;
506 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
507 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
508 if (keep_alive)
509 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
510 if (!ctx->ref_count &&
511 ((ctx->panicked && !keep_crashed) || !keep_alive)) {
512 if (!ctx->is_releasing) {
513 TAILQ_REMOVE(&tee_ctxes, ctx, link);
514 ctx->is_releasing = true;
515 }
516 mutex_unlock(&tee_ta_mutex);
517
518 destroy_context(ctx);
519 } else
520 mutex_unlock(&tee_ta_mutex);
521
522 return TEE_SUCCESS;
523 }
524
tee_ta_init_session_with_context(struct tee_ta_session * s,const TEE_UUID * uuid)525 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
526 const TEE_UUID *uuid)
527 {
528 struct tee_ta_ctx *ctx = NULL;
529
530 while (true) {
531 ctx = tee_ta_context_find(uuid);
532 if (!ctx)
533 return TEE_ERROR_ITEM_NOT_FOUND;
534
535 if (!ctx->is_initializing)
536 break;
537 /*
538 * Context is still initializing, wait here until it's
539 * fully initialized. Note that we're searching for the
540 * context again since it may have been removed while we
541 * where sleeping.
542 */
543 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
544 }
545
546 /*
547 * If the trusted service is not a single instance service (e.g. is
548 * a multi-instance TA) it should be loaded as a new instance instead
549 * of doing anything with this instance. So tell the caller that we
550 * didn't find the TA it the caller will load a new instance.
551 */
552 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
553 return TEE_ERROR_ITEM_NOT_FOUND;
554
555 /*
556 * The trusted service is single instance, if it isn't multi session we
557 * can't create another session unless its reference is zero
558 */
559 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
560 return TEE_ERROR_BUSY;
561
562 DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid);
563
564 ctx->ref_count++;
565 s->ts_sess.ctx = &ctx->ts_ctx;
566 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
567 return TEE_SUCCESS;
568 }
569
new_session_id(struct tee_ta_session_head * open_sessions)570 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
571 {
572 struct tee_ta_session *last = NULL;
573 uint32_t saved = 0;
574 uint32_t id = 1;
575
576 last = TAILQ_LAST(open_sessions, tee_ta_session_head);
577 if (last) {
578 /* This value is less likely to be already used */
579 id = last->id + 1;
580 if (!id)
581 id++; /* 0 is not valid */
582 }
583
584 saved = id;
585 do {
586 if (!tee_ta_find_session_nolock(id, open_sessions))
587 return id;
588 id++;
589 if (!id)
590 id++;
591 } while (id != saved);
592
593 return 0;
594 }
595
tee_ta_init_session(TEE_ErrorOrigin * err,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,struct tee_ta_session ** sess)596 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
597 struct tee_ta_session_head *open_sessions,
598 const TEE_UUID *uuid,
599 struct tee_ta_session **sess)
600 {
601 TEE_Result res;
602 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
603
604 *err = TEE_ORIGIN_TEE;
605 if (!s)
606 return TEE_ERROR_OUT_OF_MEMORY;
607
608 s->cancel_mask = true;
609 condvar_init(&s->refc_cv);
610 condvar_init(&s->lock_cv);
611 s->lock_thread = THREAD_ID_INVALID;
612 s->ref_count = 1;
613
614 mutex_lock(&tee_ta_mutex);
615 s->id = new_session_id(open_sessions);
616 if (!s->id) {
617 res = TEE_ERROR_OVERFLOW;
618 goto err_mutex_unlock;
619 }
620
621 TAILQ_INSERT_TAIL(open_sessions, s, link);
622
623 /* Look for already loaded TA */
624 res = tee_ta_init_session_with_context(s, uuid);
625 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
626 mutex_unlock(&tee_ta_mutex);
627 goto out;
628 }
629
630 /* Look for secure partition */
631 res = stmm_init_session(uuid, s);
632 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
633 mutex_unlock(&tee_ta_mutex);
634 if (res == TEE_SUCCESS)
635 res = stmm_complete_session(s);
636
637 goto out;
638 }
639
640 /* Look for pseudo TA */
641 res = tee_ta_init_pseudo_ta_session(uuid, s);
642 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
643 mutex_unlock(&tee_ta_mutex);
644 goto out;
645 }
646
647 /* Look for user TA */
648 res = tee_ta_init_user_ta_session(uuid, s);
649 mutex_unlock(&tee_ta_mutex);
650 if (res == TEE_SUCCESS)
651 res = tee_ta_complete_user_ta_session(s);
652
653 out:
654 if (!res) {
655 *sess = s;
656 return TEE_SUCCESS;
657 }
658
659 mutex_lock(&tee_ta_mutex);
660 TAILQ_REMOVE(open_sessions, s, link);
661 err_mutex_unlock:
662 mutex_unlock(&tee_ta_mutex);
663 free(s);
664 return res;
665 }
666
maybe_release_ta_ctx(struct tee_ta_ctx * ctx)667 static void maybe_release_ta_ctx(struct tee_ta_ctx *ctx)
668 {
669 bool was_releasing = false;
670 bool keep_crashed = false;
671 bool keep_alive = false;
672
673 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
674 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
675 if (keep_alive)
676 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
677
678 /*
679 * Keep panicked TAs with SINGLE_INSTANCE, KEEP_ALIVE, and KEEP_CRASHED
680 * flags in the context list to maintain their panicked status and
681 * prevent respawning.
682 */
683 if (!keep_crashed) {
684 mutex_lock(&tee_ta_mutex);
685 was_releasing = ctx->is_releasing;
686 ctx->is_releasing = true;
687 if (!was_releasing) {
688 DMSG("Releasing panicked TA ctx");
689 TAILQ_REMOVE(&tee_ctxes, ctx, link);
690 }
691 mutex_unlock(&tee_ta_mutex);
692
693 if (!was_releasing)
694 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
695 }
696 }
697
tee_ta_open_session(TEE_ErrorOrigin * err,struct tee_ta_session ** sess,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,const TEE_Identity * clnt_id,uint32_t cancel_req_to,struct tee_ta_param * param)698 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
699 struct tee_ta_session **sess,
700 struct tee_ta_session_head *open_sessions,
701 const TEE_UUID *uuid,
702 const TEE_Identity *clnt_id,
703 uint32_t cancel_req_to,
704 struct tee_ta_param *param)
705 {
706 TEE_Result res = TEE_SUCCESS;
707 struct tee_ta_session *s = NULL;
708 struct tee_ta_ctx *ctx = NULL;
709 struct ts_ctx *ts_ctx = NULL;
710 bool panicked = false;
711 bool was_busy = false;
712
713 res = tee_ta_init_session(err, open_sessions, uuid, &s);
714 if (res != TEE_SUCCESS) {
715 DMSG("init session failed 0x%x", res);
716 return res;
717 }
718
719 if (!check_params(s, param))
720 return TEE_ERROR_BAD_PARAMETERS;
721
722 ts_ctx = s->ts_sess.ctx;
723 ctx = ts_to_ta_ctx(ts_ctx);
724
725 if (tee_ta_try_set_busy(ctx)) {
726 if (!ctx->panicked) {
727 /* Save identity of the owner of the session */
728 s->clnt_id = *clnt_id;
729 s->param = param;
730 set_invoke_timeout(s, cancel_req_to);
731 res = ts_ctx->ops->enter_open_session(&s->ts_sess);
732 s->param = NULL;
733 }
734
735 panicked = ctx->panicked;
736 if (panicked) {
737 maybe_release_ta_ctx(ctx);
738 res = TEE_ERROR_TARGET_DEAD;
739 }
740
741 tee_ta_clear_busy(ctx);
742 } else {
743 /* Deadlock avoided */
744 res = TEE_ERROR_BUSY;
745 was_busy = true;
746 }
747
748 /*
749 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
750 * apart from panicking.
751 */
752 if (panicked || was_busy)
753 *err = TEE_ORIGIN_TEE;
754 else
755 *err = s->err_origin;
756
757 tee_ta_put_session(s);
758 if (panicked || res != TEE_SUCCESS)
759 tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
760
761 if (!res)
762 *sess = s;
763 else
764 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res);
765
766 return res;
767 }
768
tee_ta_invoke_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id,uint32_t cancel_req_to,uint32_t cmd,struct tee_ta_param * param)769 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
770 struct tee_ta_session *sess,
771 const TEE_Identity *clnt_id,
772 uint32_t cancel_req_to, uint32_t cmd,
773 struct tee_ta_param *param)
774 {
775 struct tee_ta_ctx *ta_ctx = NULL;
776 struct ts_ctx *ts_ctx = NULL;
777 TEE_Result res = TEE_SUCCESS;
778 bool panicked = false;
779
780 if (check_client(sess, clnt_id) != TEE_SUCCESS)
781 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
782
783 if (!check_params(sess, param))
784 return TEE_ERROR_BAD_PARAMETERS;
785
786 ts_ctx = sess->ts_sess.ctx;
787 ta_ctx = ts_to_ta_ctx(ts_ctx);
788
789 tee_ta_set_busy(ta_ctx);
790
791 if (!ta_ctx->panicked) {
792 sess->param = param;
793 set_invoke_timeout(sess, cancel_req_to);
794 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
795 sess->param = NULL;
796 }
797
798 panicked = ta_ctx->panicked;
799 if (panicked) {
800 maybe_release_ta_ctx(ta_ctx);
801 res = TEE_ERROR_TARGET_DEAD;
802 }
803
804 tee_ta_clear_busy(ta_ctx);
805
806 /*
807 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
808 * apart from panicking.
809 */
810 if (panicked)
811 *err = TEE_ORIGIN_TEE;
812 else
813 *err = sess->err_origin;
814
815 /* Short buffer is not an effective error case */
816 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
817 DMSG("Error: %x of %d", res, *err);
818
819 return res;
820 }
821
822 #if defined(CFG_TA_STATS)
dump_ta_memstats(struct tee_ta_session * s,struct tee_ta_param * param)823 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
824 struct tee_ta_param *param)
825 {
826 TEE_Result res = TEE_SUCCESS;
827 struct tee_ta_ctx *ctx = NULL;
828 struct ts_ctx *ts_ctx = NULL;
829 bool panicked = false;
830
831 ts_ctx = s->ts_sess.ctx;
832 if (!ts_ctx)
833 return TEE_ERROR_ITEM_NOT_FOUND;
834
835 ctx = ts_to_ta_ctx(ts_ctx);
836
837 if (ctx->is_initializing)
838 return TEE_ERROR_BAD_STATE;
839
840 if (tee_ta_try_set_busy(ctx)) {
841 if (!ctx->panicked) {
842 s->param = param;
843 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
844 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
845 s->param = NULL;
846 }
847
848 panicked = ctx->panicked;
849 if (panicked) {
850 maybe_release_ta_ctx(ctx);
851 res = TEE_ERROR_TARGET_DEAD;
852 }
853
854 tee_ta_clear_busy(ctx);
855 } else {
856 /* Deadlock avoided */
857 res = TEE_ERROR_BUSY;
858 }
859
860 return res;
861 }
862
init_dump_ctx(struct tee_ta_dump_ctx * dump_ctx)863 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
864 {
865 struct tee_ta_session *sess = NULL;
866 struct tee_ta_session_head *open_sessions = NULL;
867 struct tee_ta_ctx *ctx = NULL;
868 unsigned int n = 0;
869
870 nsec_sessions_list_head(&open_sessions);
871 /*
872 * Scan all sessions opened from secure side by searching through
873 * all available TA instances and for each context, scan all opened
874 * sessions.
875 */
876 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
877 unsigned int cnt = 0;
878
879 if (!is_user_ta_ctx(&ctx->ts_ctx))
880 continue;
881
882 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
883 sizeof(ctx->ts_ctx.uuid));
884 dump_ctx[n].panicked = ctx->panicked;
885 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
886 TAILQ_FOREACH(sess, open_sessions, link) {
887 if (sess->ts_sess.ctx == &ctx->ts_ctx) {
888 if (cnt == MAX_DUMP_SESS_NUM)
889 break;
890
891 dump_ctx[n].sess_id[cnt] = sess->id;
892 cnt++;
893 }
894 }
895
896 dump_ctx[n].sess_num = cnt;
897 n++;
898 }
899 }
900
dump_ta_stats(struct tee_ta_dump_ctx * dump_ctx,struct pta_stats_ta * dump_stats,size_t ta_count)901 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
902 struct pta_stats_ta *dump_stats,
903 size_t ta_count)
904 {
905 TEE_Result res = TEE_SUCCESS;
906 struct tee_ta_session *sess = NULL;
907 struct tee_ta_session_head *open_sessions = NULL;
908 struct tee_ta_param param = { };
909 unsigned int i = 0;
910 unsigned int j = 0;
911
912 nsec_sessions_list_head(&open_sessions);
913
914 for (i = 0; i < ta_count; i++) {
915 struct pta_stats_ta *stats = &dump_stats[i];
916
917 memcpy(&stats->uuid, &dump_ctx[i].uuid,
918 sizeof(dump_ctx[i].uuid));
919 stats->panicked = dump_ctx[i].panicked;
920 stats->sess_num = dump_ctx[i].sess_num;
921
922 /* Find a session from dump context */
923 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
924 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
925 open_sessions);
926
927 if (!sess)
928 continue;
929 /* If session is existing, get its heap stats */
930 memset(¶m, 0, sizeof(struct tee_ta_param));
931 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
932 TEE_PARAM_TYPE_VALUE_OUTPUT,
933 TEE_PARAM_TYPE_VALUE_OUTPUT,
934 TEE_PARAM_TYPE_NONE);
935 res = dump_ta_memstats(sess, ¶m);
936 if (res == TEE_SUCCESS) {
937 stats->heap.allocated = param.u[0].val.a;
938 stats->heap.max_allocated = param.u[0].val.b;
939 stats->heap.size = param.u[1].val.a;
940 stats->heap.num_alloc_fail = param.u[1].val.b;
941 stats->heap.biggest_alloc_fail = param.u[2].val.a;
942 stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
943 } else {
944 memset(&stats->heap, 0, sizeof(stats->heap));
945 }
946 tee_ta_put_session(sess);
947 }
948
949 return TEE_SUCCESS;
950 }
951
tee_ta_instance_stats(void * buf,size_t * buf_size)952 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
953 {
954 TEE_Result res = TEE_SUCCESS;
955 struct pta_stats_ta *dump_stats = NULL;
956 struct tee_ta_dump_ctx *dump_ctx = NULL;
957 struct tee_ta_ctx *ctx = NULL;
958 size_t sz = 0;
959 size_t ta_count = 0;
960
961 if (!buf_size)
962 return TEE_ERROR_BAD_PARAMETERS;
963
964 mutex_lock(&tee_ta_mutex);
965
966 /* Go through all available TA and calc out the actual buffer size. */
967 TAILQ_FOREACH(ctx, &tee_ctxes, link)
968 if (is_user_ta_ctx(&ctx->ts_ctx))
969 ta_count++;
970
971 sz = sizeof(struct pta_stats_ta) * ta_count;
972 if (!sz) {
973 /* sz = 0 means there is no UTA, return no item found. */
974 res = TEE_ERROR_ITEM_NOT_FOUND;
975 } else if (!buf || *buf_size < sz) {
976 /*
977 * buf is null or pass size less than actual size
978 * means caller try to query the buffer size.
979 * update *buf_size.
980 */
981 *buf_size = sz;
982 res = TEE_ERROR_SHORT_BUFFER;
983 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
984 DMSG("Data alignment");
985 res = TEE_ERROR_BAD_PARAMETERS;
986 } else {
987 dump_stats = (struct pta_stats_ta *)buf;
988 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
989 if (!dump_ctx)
990 res = TEE_ERROR_OUT_OF_MEMORY;
991 else
992 init_dump_ctx(dump_ctx);
993 }
994 mutex_unlock(&tee_ta_mutex);
995
996 if (res != TEE_SUCCESS)
997 return res;
998
999 /* Dump user ta stats by iterating dump_ctx[] */
1000 res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
1001 if (res == TEE_SUCCESS)
1002 *buf_size = sz;
1003
1004 free(dump_ctx);
1005 return res;
1006 }
1007 #endif
1008
tee_ta_cancel_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id)1009 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
1010 struct tee_ta_session *sess,
1011 const TEE_Identity *clnt_id)
1012 {
1013 *err = TEE_ORIGIN_TEE;
1014
1015 if (check_client(sess, clnt_id) != TEE_SUCCESS)
1016 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1017
1018 sess->cancel = true;
1019 return TEE_SUCCESS;
1020 }
1021
tee_ta_session_is_cancelled(struct tee_ta_session * s,TEE_Time * curr_time)1022 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1023 {
1024 TEE_Time current_time;
1025
1026 if (s->cancel_mask)
1027 return false;
1028
1029 if (s->cancel)
1030 return true;
1031
1032 if (s->cancel_time.seconds == UINT32_MAX)
1033 return false;
1034
1035 if (curr_time != NULL)
1036 current_time = *curr_time;
1037 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
1038 return false;
1039
1040 if (current_time.seconds > s->cancel_time.seconds ||
1041 (current_time.seconds == s->cancel_time.seconds &&
1042 current_time.millis >= s->cancel_time.millis)) {
1043 return true;
1044 }
1045
1046 return false;
1047 }
1048
1049 #if defined(CFG_TA_GPROF_SUPPORT)
tee_ta_gprof_sample_pc(vaddr_t pc)1050 void tee_ta_gprof_sample_pc(vaddr_t pc)
1051 {
1052 struct ts_session *s = ts_get_current_session();
1053 struct user_ta_ctx *utc = NULL;
1054 struct sample_buf *sbuf = NULL;
1055 TEE_Result res = 0;
1056 size_t idx = 0;
1057
1058 sbuf = s->sbuf;
1059 if (!sbuf || !sbuf->enabled)
1060 return; /* PC sampling is not enabled */
1061
1062 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1063 if (idx < sbuf->nsamples) {
1064 utc = to_user_ta_ctx(s->ctx);
1065 res = vm_check_access_rights(&utc->uctx,
1066 TEE_MEMORY_ACCESS_READ |
1067 TEE_MEMORY_ACCESS_WRITE |
1068 TEE_MEMORY_ACCESS_ANY_OWNER,
1069 (uaddr_t)&sbuf->samples[idx],
1070 sizeof(*sbuf->samples));
1071 if (res != TEE_SUCCESS)
1072 return;
1073 sbuf->samples[idx]++;
1074 }
1075 sbuf->count++;
1076 }
1077
gprof_update_session_utime(bool suspend,struct ts_session * s,uint64_t now)1078 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1079 uint64_t now)
1080 {
1081 struct sample_buf *sbuf = s->sbuf;
1082
1083 if (!sbuf)
1084 return;
1085
1086 if (suspend) {
1087 assert(sbuf->usr_entered);
1088 sbuf->usr += now - sbuf->usr_entered;
1089 sbuf->usr_entered = 0;
1090 } else {
1091 assert(!sbuf->usr_entered);
1092 if (!now)
1093 now++; /* 0 is reserved */
1094 sbuf->usr_entered = now;
1095 }
1096 }
1097
1098 /*
1099 * Update user-mode CPU time for the current session
1100 * @suspend: true if session is being suspended (leaving user mode), false if
1101 * it is resumed (entering user mode)
1102 */
tee_ta_update_session_utime(bool suspend)1103 static void tee_ta_update_session_utime(bool suspend)
1104 {
1105 struct ts_session *s = ts_get_current_session();
1106 uint64_t now = barrier_read_counter_timer();
1107
1108 gprof_update_session_utime(suspend, s, now);
1109 }
1110
tee_ta_update_session_utime_suspend(void)1111 void tee_ta_update_session_utime_suspend(void)
1112 {
1113 tee_ta_update_session_utime(true);
1114 }
1115
tee_ta_update_session_utime_resume(void)1116 void tee_ta_update_session_utime_resume(void)
1117 {
1118 tee_ta_update_session_utime(false);
1119 }
1120 #endif
1121
1122 #if defined(CFG_FTRACE_SUPPORT)
ftrace_update_times(bool suspend)1123 static void ftrace_update_times(bool suspend)
1124 {
1125 struct ts_session *s = ts_get_current_session_may_fail();
1126 struct ftrace_buf *fbuf = NULL;
1127 uint64_t now = 0;
1128 uint32_t i = 0;
1129
1130 if (!s)
1131 return;
1132
1133 now = barrier_read_counter_timer();
1134
1135 fbuf = s->fbuf;
1136 if (!fbuf)
1137 return;
1138
1139 if (suspend) {
1140 fbuf->suspend_time = now;
1141 } else {
1142 for (i = 0; i <= fbuf->ret_idx; i++)
1143 fbuf->begin_time[i] += now - fbuf->suspend_time;
1144 }
1145 }
1146
tee_ta_ftrace_update_times_suspend(void)1147 void tee_ta_ftrace_update_times_suspend(void)
1148 {
1149 ftrace_update_times(true);
1150 }
1151
tee_ta_ftrace_update_times_resume(void)1152 void tee_ta_ftrace_update_times_resume(void)
1153 {
1154 ftrace_update_times(false);
1155 }
1156 #endif
1157
is_ta_ctx(struct ts_ctx * ctx)1158 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1159 {
1160 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1161 }
1162