1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2020, Arm Limited
5 * Copyright (c) 2025, NVIDIA Corporation & AFFILIATES.
6 */
7
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/panic.h>
11 #include <kernel/pseudo_ta.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tee_ta_manager.h>
16 #include <kernel/tee_time.h>
17 #include <kernel/thread.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/user_ta.h>
20 #include <malloc.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <mm/vm.h>
25 #include <pta_stats.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <tee_api_types.h>
29 #include <tee/entry_std.h>
30 #include <tee/tee_obj.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36
37 #if defined(CFG_TA_STATS)
38 #define MAX_DUMP_SESS_NUM (16)
39
40 struct tee_ta_dump_ctx {
41 TEE_UUID uuid;
42 uint32_t panicked;
43 bool is_user_ta;
44 uint32_t sess_num;
45 uint32_t sess_id[MAX_DUMP_SESS_NUM];
46 };
47 #endif
48
49 /* This mutex protects the critical section in tee_ta_init_session */
50 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
51 /* This condvar is used when waiting for a TA context to become initialized */
52 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
53 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
54
55 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
56 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
57 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
58 static size_t tee_ta_single_instance_count;
59 #endif
60
61 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
lock_single_instance(void)62 static void lock_single_instance(void)
63 {
64 }
65
unlock_single_instance(void)66 static void unlock_single_instance(void)
67 {
68 }
69
has_single_instance_lock(void)70 static bool has_single_instance_lock(void)
71 {
72 return false;
73 }
74 #else
lock_single_instance(void)75 static void lock_single_instance(void)
76 {
77 /* Requires tee_ta_mutex to be held */
78 if (tee_ta_single_instance_thread != thread_get_id()) {
79 /* Wait until the single-instance lock is available. */
80 while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
81 condvar_wait(&tee_ta_cv, &tee_ta_mutex);
82
83 tee_ta_single_instance_thread = thread_get_id();
84 assert(tee_ta_single_instance_count == 0);
85 }
86
87 tee_ta_single_instance_count++;
88 }
89
unlock_single_instance(void)90 static void unlock_single_instance(void)
91 {
92 /* Requires tee_ta_mutex to be held */
93 assert(tee_ta_single_instance_thread == thread_get_id());
94 assert(tee_ta_single_instance_count > 0);
95
96 tee_ta_single_instance_count--;
97 if (tee_ta_single_instance_count == 0) {
98 tee_ta_single_instance_thread = THREAD_ID_INVALID;
99 condvar_signal(&tee_ta_cv);
100 }
101 }
102
has_single_instance_lock(void)103 static bool has_single_instance_lock(void)
104 {
105 /* Requires tee_ta_mutex to be held */
106 return tee_ta_single_instance_thread == thread_get_id();
107 }
108 #endif
109
to_ta_session(struct ts_session * sess)110 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
111 {
112 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
113 return container_of(sess, struct tee_ta_session, ts_sess);
114 }
115
ts_to_ta_ctx(struct ts_ctx * ctx)116 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
117 {
118 if (is_ta_ctx(ctx))
119 return to_ta_ctx(ctx);
120
121 if (is_stmm_ctx(ctx))
122 return &(to_stmm_ctx(ctx)->ta_ctx);
123
124 panic("bad context");
125 }
126
tee_ta_try_set_busy(struct tee_ta_ctx * ctx)127 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
128 {
129 bool rc = true;
130
131 if (ctx->flags & TA_FLAG_CONCURRENT)
132 return true;
133
134 mutex_lock(&tee_ta_mutex);
135
136 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
137 lock_single_instance();
138
139 if (has_single_instance_lock()) {
140 if (ctx->busy) {
141 /*
142 * We're holding the single-instance lock and the
143 * TA is busy, as waiting now would only cause a
144 * dead-lock, we release the lock and return false.
145 */
146 rc = false;
147 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
148 unlock_single_instance();
149 }
150 } else {
151 /*
152 * We're not holding the single-instance lock, we're free to
153 * wait for the TA to become available.
154 */
155 while (ctx->busy)
156 condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
157 }
158
159 /* Either it's already true or we should set it to true */
160 ctx->busy = true;
161
162 mutex_unlock(&tee_ta_mutex);
163 return rc;
164 }
165
tee_ta_set_busy(struct tee_ta_ctx * ctx)166 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
167 {
168 if (!tee_ta_try_set_busy(ctx))
169 panic();
170 }
171
tee_ta_clear_busy(struct tee_ta_ctx * ctx)172 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
173 {
174 if (ctx->flags & TA_FLAG_CONCURRENT)
175 return;
176
177 mutex_lock(&tee_ta_mutex);
178
179 assert(ctx->busy);
180 ctx->busy = false;
181 condvar_signal(&ctx->busy_cv);
182
183 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
184 unlock_single_instance();
185
186 mutex_unlock(&tee_ta_mutex);
187 }
188
dec_session_ref_count(struct tee_ta_session * s)189 static void dec_session_ref_count(struct tee_ta_session *s)
190 {
191 assert(s->ref_count > 0);
192 s->ref_count--;
193 if (s->ref_count == 1)
194 condvar_signal(&s->refc_cv);
195 }
196
tee_ta_put_session(struct tee_ta_session * s)197 void tee_ta_put_session(struct tee_ta_session *s)
198 {
199 mutex_lock(&tee_ta_mutex);
200
201 if (s->lock_thread == thread_get_id()) {
202 s->lock_thread = THREAD_ID_INVALID;
203 condvar_signal(&s->lock_cv);
204 }
205 dec_session_ref_count(s);
206
207 mutex_unlock(&tee_ta_mutex);
208 }
209
tee_ta_find_session_nolock(uint32_t id,struct tee_ta_session_head * open_sessions)210 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
211 struct tee_ta_session_head *open_sessions)
212 {
213 struct tee_ta_session *s = NULL;
214 struct tee_ta_session *found = NULL;
215
216 TAILQ_FOREACH(s, open_sessions, link) {
217 if (s->id == id) {
218 found = s;
219 break;
220 }
221 }
222
223 return found;
224 }
225
tee_ta_find_session(uint32_t id,struct tee_ta_session_head * open_sessions)226 struct tee_ta_session *tee_ta_find_session(uint32_t id,
227 struct tee_ta_session_head *open_sessions)
228 {
229 struct tee_ta_session *s = NULL;
230
231 mutex_lock(&tee_ta_mutex);
232
233 s = tee_ta_find_session_nolock(id, open_sessions);
234
235 mutex_unlock(&tee_ta_mutex);
236
237 return s;
238 }
239
tee_ta_get_session(uint32_t id,bool exclusive,struct tee_ta_session_head * open_sessions)240 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
241 struct tee_ta_session_head *open_sessions)
242 {
243 struct tee_ta_session *s;
244
245 mutex_lock(&tee_ta_mutex);
246
247 while (true) {
248 s = tee_ta_find_session_nolock(id, open_sessions);
249 if (!s)
250 break;
251 if (s->unlink) {
252 s = NULL;
253 break;
254 }
255 s->ref_count++;
256 if (!exclusive)
257 break;
258
259 assert(s->lock_thread != thread_get_id());
260
261 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
262 condvar_wait(&s->lock_cv, &tee_ta_mutex);
263
264 if (s->unlink) {
265 dec_session_ref_count(s);
266 s = NULL;
267 break;
268 }
269
270 s->lock_thread = thread_get_id();
271 break;
272 }
273
274 mutex_unlock(&tee_ta_mutex);
275 return s;
276 }
277
tee_ta_unlink_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)278 static void tee_ta_unlink_session(struct tee_ta_session *s,
279 struct tee_ta_session_head *open_sessions)
280 {
281 mutex_lock(&tee_ta_mutex);
282
283 assert(s->ref_count >= 1);
284 assert(s->lock_thread == thread_get_id());
285 assert(!s->unlink);
286
287 s->unlink = true;
288 condvar_broadcast(&s->lock_cv);
289
290 while (s->ref_count != 1)
291 condvar_wait(&s->refc_cv, &tee_ta_mutex);
292
293 TAILQ_REMOVE(open_sessions, s, link);
294
295 mutex_unlock(&tee_ta_mutex);
296 }
297
dump_ftrace(struct tee_ta_session * s __maybe_unused)298 static void dump_ftrace(struct tee_ta_session *s __maybe_unused)
299 {
300 #if defined(CFG_FTRACE_SUPPORT)
301 struct ts_ctx *ts_ctx = s->ts_sess.ctx;
302
303 if (ts_ctx && ts_ctx->ops->dump_ftrace) {
304 ts_push_current_session(&s->ts_sess);
305 ts_ctx->ops->dump_ftrace(ts_ctx);
306 ts_pop_current_session();
307 }
308 #endif
309 }
310
destroy_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)311 static void destroy_session(struct tee_ta_session *s,
312 struct tee_ta_session_head *open_sessions)
313 {
314 dump_ftrace(s);
315
316 tee_ta_unlink_session(s, open_sessions);
317 #if defined(CFG_TA_GPROF_SUPPORT)
318 free(s->ts_sess.sbuf);
319 #endif
320 free(s);
321 }
322
destroy_context(struct tee_ta_ctx * ctx)323 static void destroy_context(struct tee_ta_ctx *ctx)
324 {
325 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx);
326
327 condvar_destroy(&ctx->busy_cv);
328 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
329 }
330
331 /*
332 * tee_ta_context_find - Find TA in session list based on a UUID (input)
333 * Returns a pointer to the session
334 */
tee_ta_context_find(const TEE_UUID * uuid)335 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
336 {
337 struct tee_ta_ctx *ctx;
338
339 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
340 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
341 return ctx;
342 }
343
344 return NULL;
345 }
346
347 /* check if requester (client ID) matches session initial client */
check_client(struct tee_ta_session * s,const TEE_Identity * id)348 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
349 {
350 if (id == KERN_IDENTITY)
351 return TEE_SUCCESS;
352
353 if (id == NSAPP_IDENTITY) {
354 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
355 DMSG("nsec tries to hijack TA session");
356 return TEE_ERROR_ACCESS_DENIED;
357 }
358 return TEE_SUCCESS;
359 }
360
361 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
362 DMSG("client id mismatch");
363 return TEE_ERROR_ACCESS_DENIED;
364 }
365 return TEE_SUCCESS;
366 }
367
368 /*
369 * Check if invocation parameters matches TA properties
370 *
371 * @s - current session handle
372 * @param - already identified memory references hold a valid 'mobj'.
373 *
374 * Policy:
375 * - All TAs can access 'non-secure' shared memory.
376 * - All TAs can access TEE private memory (seccpy)
377 * - Only SDP flagged TAs can accept SDP memory references.
378 */
379 #ifndef CFG_SECURE_DATA_PATH
check_params(struct tee_ta_session * sess __unused,struct tee_ta_param * param __unused)380 static bool check_params(struct tee_ta_session *sess __unused,
381 struct tee_ta_param *param __unused)
382 {
383 /*
384 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
385 * are rejected at OP-TEE core entry. Hence here all TAs have same
386 * permissions regarding memory reference parameters.
387 */
388 return true;
389 }
390 #else
check_params(struct tee_ta_session * sess,struct tee_ta_param * param)391 static bool check_params(struct tee_ta_session *sess,
392 struct tee_ta_param *param)
393 {
394 int n;
395
396 /*
397 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
398 * SDP memory references. Only TAs flagged SDP can access SDP memory.
399 */
400 if (sess->ts_sess.ctx &&
401 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
402 return true;
403
404 for (n = 0; n < TEE_NUM_PARAMS; n++) {
405 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
406 struct param_mem *mem = ¶m->u[n].mem;
407
408 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
409 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
410 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
411 continue;
412 if (!mem->size)
413 continue;
414 if (mobj_is_sdp_mem(mem->mobj))
415 return false;
416 }
417 return true;
418 }
419 #endif
420
set_invoke_timeout(struct tee_ta_session * sess,uint32_t cancel_req_to)421 static void set_invoke_timeout(struct tee_ta_session *sess,
422 uint32_t cancel_req_to)
423 {
424 TEE_Time current_time;
425 TEE_Time cancel_time;
426
427 if (cancel_req_to == TEE_TIMEOUT_INFINITE)
428 goto infinite;
429
430 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
431 goto infinite;
432
433 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
434 &cancel_time.seconds))
435 goto infinite;
436
437 cancel_time.millis = current_time.millis + cancel_req_to % 1000;
438 if (cancel_time.millis > 1000) {
439 if (ADD_OVERFLOW(current_time.seconds, 1,
440 &cancel_time.seconds))
441 goto infinite;
442
443 cancel_time.seconds++;
444 cancel_time.millis -= 1000;
445 }
446
447 sess->cancel_time = cancel_time;
448 return;
449
450 infinite:
451 sess->cancel_time.seconds = UINT32_MAX;
452 sess->cancel_time.millis = UINT32_MAX;
453 }
454
455 /*-----------------------------------------------------------------------------
456 * Close a Trusted Application and free available resources
457 *---------------------------------------------------------------------------*/
tee_ta_close_session(struct tee_ta_session * csess,struct tee_ta_session_head * open_sessions,const TEE_Identity * clnt_id)458 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
459 struct tee_ta_session_head *open_sessions,
460 const TEE_Identity *clnt_id)
461 {
462 struct tee_ta_session *sess = NULL;
463 struct tee_ta_ctx *ctx = NULL;
464 struct ts_ctx *ts_ctx = NULL;
465 bool keep_crashed = false;
466 bool keep_alive = false;
467
468 DMSG("csess 0x%" PRIxVA " id %u",
469 (vaddr_t)csess, csess ? csess->id : UINT_MAX);
470
471 if (!csess)
472 return TEE_ERROR_ITEM_NOT_FOUND;
473
474 sess = tee_ta_get_session(csess->id, true, open_sessions);
475
476 if (!sess) {
477 EMSG("session 0x%" PRIxVA " to be removed is not found",
478 (vaddr_t)csess);
479 return TEE_ERROR_ITEM_NOT_FOUND;
480 }
481
482 if (check_client(sess, clnt_id) != TEE_SUCCESS) {
483 tee_ta_put_session(sess);
484 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
485 }
486
487 DMSG("Destroy session");
488
489 ts_ctx = sess->ts_sess.ctx;
490 if (!ts_ctx) {
491 destroy_session(sess, open_sessions);
492 return TEE_SUCCESS;
493 }
494
495 ctx = ts_to_ta_ctx(ts_ctx);
496 if (ctx->panicked) {
497 destroy_session(sess, open_sessions);
498 } else {
499 tee_ta_set_busy(ctx);
500 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
501 ts_ctx->ops->enter_close_session(&sess->ts_sess);
502 destroy_session(sess, open_sessions);
503 tee_ta_clear_busy(ctx);
504 }
505
506 mutex_lock(&tee_ta_mutex);
507
508 if (ctx->ref_count <= 0)
509 panic();
510
511 ctx->ref_count--;
512 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
513 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
514 if (keep_alive)
515 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
516 if (!ctx->ref_count &&
517 ((ctx->panicked && !keep_crashed) || !keep_alive)) {
518 if (!ctx->is_releasing) {
519 TAILQ_REMOVE(&tee_ctxes, ctx, link);
520 ctx->is_releasing = true;
521 }
522 mutex_unlock(&tee_ta_mutex);
523
524 destroy_context(ctx);
525 } else
526 mutex_unlock(&tee_ta_mutex);
527
528 return TEE_SUCCESS;
529 }
530
tee_ta_init_session_with_context(struct tee_ta_session * s,const TEE_UUID * uuid)531 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
532 const TEE_UUID *uuid)
533 {
534 struct tee_ta_ctx *ctx = NULL;
535
536 while (true) {
537 ctx = tee_ta_context_find(uuid);
538 if (!ctx)
539 return TEE_ERROR_ITEM_NOT_FOUND;
540
541 if (!ctx->is_initializing)
542 break;
543 /*
544 * Context is still initializing, wait here until it's
545 * fully initialized. Note that we're searching for the
546 * context again since it may have been removed while we
547 * where sleeping.
548 */
549 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
550 }
551
552 /*
553 * If the trusted service is not a single instance service (e.g. is
554 * a multi-instance TA) it should be loaded as a new instance instead
555 * of doing anything with this instance. So tell the caller that we
556 * didn't find the TA it the caller will load a new instance.
557 */
558 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
559 return TEE_ERROR_ITEM_NOT_FOUND;
560
561 /*
562 * The trusted service is single instance, if it isn't multi session we
563 * can't create another session unless its reference is zero
564 */
565 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
566 return TEE_ERROR_BUSY;
567
568 DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid);
569
570 ctx->ref_count++;
571 s->ts_sess.ctx = &ctx->ts_ctx;
572 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
573 return TEE_SUCCESS;
574 }
575
new_session_id(struct tee_ta_session_head * open_sessions)576 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
577 {
578 struct tee_ta_session *last = NULL;
579 uint32_t saved = 0;
580 uint32_t id = 1;
581
582 last = TAILQ_LAST(open_sessions, tee_ta_session_head);
583 if (last) {
584 /* This value is less likely to be already used */
585 id = last->id + 1;
586 if (!id)
587 id++; /* 0 is not valid */
588 }
589
590 saved = id;
591 do {
592 if (!tee_ta_find_session_nolock(id, open_sessions))
593 return id;
594 id++;
595 if (!id)
596 id++;
597 } while (id != saved);
598
599 return 0;
600 }
601
tee_ta_init_session(TEE_ErrorOrigin * err,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,struct tee_ta_session ** sess)602 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
603 struct tee_ta_session_head *open_sessions,
604 const TEE_UUID *uuid,
605 struct tee_ta_session **sess)
606 {
607 TEE_Result res;
608 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
609
610 *err = TEE_ORIGIN_TEE;
611 if (!s)
612 return TEE_ERROR_OUT_OF_MEMORY;
613
614 s->cancel_mask = true;
615 condvar_init(&s->refc_cv);
616 condvar_init(&s->lock_cv);
617 s->lock_thread = THREAD_ID_INVALID;
618 s->ref_count = 1;
619
620 mutex_lock(&tee_ta_mutex);
621 s->id = new_session_id(open_sessions);
622 if (!s->id) {
623 res = TEE_ERROR_OVERFLOW;
624 goto err_mutex_unlock;
625 }
626
627 TAILQ_INSERT_TAIL(open_sessions, s, link);
628
629 /* Look for already loaded TA */
630 res = tee_ta_init_session_with_context(s, uuid);
631 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
632 mutex_unlock(&tee_ta_mutex);
633 goto out;
634 }
635
636 /* Look for secure partition */
637 res = stmm_init_session(uuid, s);
638 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
639 mutex_unlock(&tee_ta_mutex);
640 if (res == TEE_SUCCESS)
641 res = stmm_complete_session(s);
642
643 goto out;
644 }
645
646 /* Look for pseudo TA */
647 res = tee_ta_init_pseudo_ta_session(uuid, s);
648 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
649 mutex_unlock(&tee_ta_mutex);
650 goto out;
651 }
652
653 /* Look for user TA */
654 res = tee_ta_init_user_ta_session(uuid, s);
655 mutex_unlock(&tee_ta_mutex);
656 if (res == TEE_SUCCESS)
657 res = tee_ta_complete_user_ta_session(s);
658
659 out:
660 if (!res) {
661 *sess = s;
662 return TEE_SUCCESS;
663 }
664
665 mutex_lock(&tee_ta_mutex);
666 TAILQ_REMOVE(open_sessions, s, link);
667 err_mutex_unlock:
668 mutex_unlock(&tee_ta_mutex);
669 free(s);
670 return res;
671 }
672
maybe_release_ta_ctx(struct tee_ta_ctx * ctx)673 static void maybe_release_ta_ctx(struct tee_ta_ctx *ctx)
674 {
675 bool was_releasing = false;
676 bool keep_crashed = false;
677 bool keep_alive = false;
678
679 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
680 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
681 if (keep_alive)
682 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
683
684 /*
685 * Keep panicked TAs with SINGLE_INSTANCE, KEEP_ALIVE, and KEEP_CRASHED
686 * flags in the context list to maintain their panicked status and
687 * prevent respawning.
688 */
689 if (!keep_crashed) {
690 mutex_lock(&tee_ta_mutex);
691 was_releasing = ctx->is_releasing;
692 ctx->is_releasing = true;
693 if (!was_releasing) {
694 DMSG("Releasing panicked TA ctx");
695 TAILQ_REMOVE(&tee_ctxes, ctx, link);
696 }
697 mutex_unlock(&tee_ta_mutex);
698
699 if (!was_releasing)
700 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
701 }
702 }
703
tee_ta_open_session(TEE_ErrorOrigin * err,struct tee_ta_session ** sess,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,const TEE_Identity * clnt_id,uint32_t cancel_req_to,struct tee_ta_param * param)704 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
705 struct tee_ta_session **sess,
706 struct tee_ta_session_head *open_sessions,
707 const TEE_UUID *uuid,
708 const TEE_Identity *clnt_id,
709 uint32_t cancel_req_to,
710 struct tee_ta_param *param)
711 {
712 TEE_Result res = TEE_SUCCESS;
713 struct tee_ta_session *s = NULL;
714 struct tee_ta_ctx *ctx = NULL;
715 struct ts_ctx *ts_ctx = NULL;
716 bool panicked = false;
717 bool was_busy = false;
718
719 res = tee_ta_init_session(err, open_sessions, uuid, &s);
720 if (res != TEE_SUCCESS) {
721 DMSG("init session failed 0x%x", res);
722 return res;
723 }
724
725 if (!check_params(s, param))
726 return TEE_ERROR_BAD_PARAMETERS;
727
728 ts_ctx = s->ts_sess.ctx;
729 ctx = ts_to_ta_ctx(ts_ctx);
730
731 if (tee_ta_try_set_busy(ctx)) {
732 if (!ctx->panicked) {
733 /* Save identity of the owner of the session */
734 s->clnt_id = *clnt_id;
735 s->param = param;
736 set_invoke_timeout(s, cancel_req_to);
737 res = ts_ctx->ops->enter_open_session(&s->ts_sess);
738 s->param = NULL;
739 }
740
741 panicked = ctx->panicked;
742 if (panicked) {
743 maybe_release_ta_ctx(ctx);
744 res = TEE_ERROR_TARGET_DEAD;
745 } else {
746 if (IS_ENABLED(CFG_FTRACE_DUMP_EVERY_ENTRY))
747 dump_ftrace(s);
748 }
749
750 tee_ta_clear_busy(ctx);
751 } else {
752 /* Deadlock avoided */
753 res = TEE_ERROR_BUSY;
754 was_busy = true;
755 }
756
757 /*
758 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
759 * apart from panicking.
760 */
761 if (panicked || was_busy)
762 *err = TEE_ORIGIN_TEE;
763 else
764 *err = s->err_origin;
765
766 tee_ta_put_session(s);
767 if (panicked || res != TEE_SUCCESS)
768 tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
769
770 if (!res)
771 *sess = s;
772 else
773 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res);
774
775 return res;
776 }
777
tee_ta_invoke_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id,uint32_t cancel_req_to,uint32_t cmd,struct tee_ta_param * param)778 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
779 struct tee_ta_session *sess,
780 const TEE_Identity *clnt_id,
781 uint32_t cancel_req_to, uint32_t cmd,
782 struct tee_ta_param *param)
783 {
784 struct tee_ta_ctx *ta_ctx = NULL;
785 struct ts_ctx *ts_ctx = NULL;
786 TEE_Result res = TEE_SUCCESS;
787 bool panicked = false;
788
789 if (check_client(sess, clnt_id) != TEE_SUCCESS)
790 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
791
792 if (!check_params(sess, param))
793 return TEE_ERROR_BAD_PARAMETERS;
794
795 ts_ctx = sess->ts_sess.ctx;
796 ta_ctx = ts_to_ta_ctx(ts_ctx);
797
798 tee_ta_set_busy(ta_ctx);
799
800 if (!ta_ctx->panicked) {
801 sess->param = param;
802 set_invoke_timeout(sess, cancel_req_to);
803 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
804 sess->param = NULL;
805 }
806
807 panicked = ta_ctx->panicked;
808 if (panicked) {
809 maybe_release_ta_ctx(ta_ctx);
810 res = TEE_ERROR_TARGET_DEAD;
811 } else {
812 if (IS_ENABLED(CFG_FTRACE_DUMP_EVERY_ENTRY))
813 dump_ftrace(sess);
814 }
815
816 tee_ta_clear_busy(ta_ctx);
817
818 /*
819 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
820 * apart from panicking.
821 */
822 if (panicked)
823 *err = TEE_ORIGIN_TEE;
824 else
825 *err = sess->err_origin;
826
827 /* Short buffer is not an effective error case */
828 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
829 DMSG("Error: %x of %d", res, *err);
830
831 return res;
832 }
833
834 #if defined(CFG_TA_STATS)
dump_ta_memstats(struct tee_ta_session * s,struct tee_ta_param * param)835 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
836 struct tee_ta_param *param)
837 {
838 TEE_Result res = TEE_SUCCESS;
839 struct tee_ta_ctx *ctx = NULL;
840 struct ts_ctx *ts_ctx = NULL;
841 bool panicked = false;
842
843 ts_ctx = s->ts_sess.ctx;
844 if (!ts_ctx)
845 return TEE_ERROR_ITEM_NOT_FOUND;
846
847 ctx = ts_to_ta_ctx(ts_ctx);
848
849 if (ctx->is_initializing)
850 return TEE_ERROR_BAD_STATE;
851
852 if (tee_ta_try_set_busy(ctx)) {
853 if (!ctx->panicked) {
854 s->param = param;
855 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
856 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
857 s->param = NULL;
858 }
859
860 panicked = ctx->panicked;
861 if (panicked) {
862 maybe_release_ta_ctx(ctx);
863 res = TEE_ERROR_TARGET_DEAD;
864 }
865
866 tee_ta_clear_busy(ctx);
867 } else {
868 /* Deadlock avoided */
869 res = TEE_ERROR_BUSY;
870 }
871
872 return res;
873 }
874
init_dump_ctx(struct tee_ta_dump_ctx * dump_ctx)875 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
876 {
877 struct tee_ta_session *sess = NULL;
878 struct tee_ta_session_head *open_sessions = NULL;
879 struct tee_ta_ctx *ctx = NULL;
880 unsigned int n = 0;
881
882 nsec_sessions_list_head(&open_sessions);
883 /*
884 * Scan all sessions opened from secure side by searching through
885 * all available TA instances and for each context, scan all opened
886 * sessions.
887 */
888 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
889 unsigned int cnt = 0;
890
891 if (!is_user_ta_ctx(&ctx->ts_ctx))
892 continue;
893
894 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
895 sizeof(ctx->ts_ctx.uuid));
896 dump_ctx[n].panicked = ctx->panicked;
897 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
898 TAILQ_FOREACH(sess, open_sessions, link) {
899 if (sess->ts_sess.ctx == &ctx->ts_ctx) {
900 if (cnt == MAX_DUMP_SESS_NUM)
901 break;
902
903 dump_ctx[n].sess_id[cnt] = sess->id;
904 cnt++;
905 }
906 }
907
908 dump_ctx[n].sess_num = cnt;
909 n++;
910 }
911 }
912
dump_ta_stats(struct tee_ta_dump_ctx * dump_ctx,struct pta_stats_ta * dump_stats,size_t ta_count)913 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
914 struct pta_stats_ta *dump_stats,
915 size_t ta_count)
916 {
917 TEE_Result res = TEE_SUCCESS;
918 struct tee_ta_session *sess = NULL;
919 struct tee_ta_session_head *open_sessions = NULL;
920 struct tee_ta_param param = { };
921 unsigned int i = 0;
922 unsigned int j = 0;
923
924 nsec_sessions_list_head(&open_sessions);
925
926 for (i = 0; i < ta_count; i++) {
927 struct pta_stats_ta *stats = &dump_stats[i];
928
929 memcpy(&stats->uuid, &dump_ctx[i].uuid,
930 sizeof(dump_ctx[i].uuid));
931 stats->panicked = dump_ctx[i].panicked;
932 stats->sess_num = dump_ctx[i].sess_num;
933
934 /* Find a session from dump context */
935 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
936 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
937 open_sessions);
938
939 if (!sess)
940 continue;
941 /* If session is existing, get its heap stats */
942 memset(¶m, 0, sizeof(struct tee_ta_param));
943 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
944 TEE_PARAM_TYPE_VALUE_OUTPUT,
945 TEE_PARAM_TYPE_VALUE_OUTPUT,
946 TEE_PARAM_TYPE_NONE);
947 res = dump_ta_memstats(sess, ¶m);
948 if (res == TEE_SUCCESS) {
949 stats->heap.allocated = param.u[0].val.a;
950 stats->heap.max_allocated = param.u[0].val.b;
951 stats->heap.size = param.u[1].val.a;
952 stats->heap.num_alloc_fail = param.u[1].val.b;
953 stats->heap.biggest_alloc_fail = param.u[2].val.a;
954 stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
955 } else {
956 memset(&stats->heap, 0, sizeof(stats->heap));
957 }
958 tee_ta_put_session(sess);
959 }
960
961 return TEE_SUCCESS;
962 }
963
tee_ta_instance_stats(void * buf,size_t * buf_size)964 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
965 {
966 TEE_Result res = TEE_SUCCESS;
967 struct pta_stats_ta *dump_stats = NULL;
968 struct tee_ta_dump_ctx *dump_ctx = NULL;
969 struct tee_ta_ctx *ctx = NULL;
970 size_t sz = 0;
971 size_t ta_count = 0;
972
973 if (!buf_size)
974 return TEE_ERROR_BAD_PARAMETERS;
975
976 mutex_lock(&tee_ta_mutex);
977
978 /* Go through all available TA and calc out the actual buffer size. */
979 TAILQ_FOREACH(ctx, &tee_ctxes, link)
980 if (is_user_ta_ctx(&ctx->ts_ctx))
981 ta_count++;
982
983 sz = sizeof(struct pta_stats_ta) * ta_count;
984 if (!sz) {
985 /* sz = 0 means there is no UTA, return no item found. */
986 res = TEE_ERROR_ITEM_NOT_FOUND;
987 } else if (!buf || *buf_size < sz) {
988 /*
989 * buf is null or pass size less than actual size
990 * means caller try to query the buffer size.
991 * update *buf_size.
992 */
993 *buf_size = sz;
994 res = TEE_ERROR_SHORT_BUFFER;
995 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
996 DMSG("Data alignment");
997 res = TEE_ERROR_BAD_PARAMETERS;
998 } else {
999 dump_stats = (struct pta_stats_ta *)buf;
1000 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
1001 if (!dump_ctx)
1002 res = TEE_ERROR_OUT_OF_MEMORY;
1003 else
1004 init_dump_ctx(dump_ctx);
1005 }
1006 mutex_unlock(&tee_ta_mutex);
1007
1008 if (res != TEE_SUCCESS)
1009 return res;
1010
1011 /* Dump user ta stats by iterating dump_ctx[] */
1012 res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
1013 if (res == TEE_SUCCESS)
1014 *buf_size = sz;
1015
1016 free(dump_ctx);
1017 return res;
1018 }
1019 #endif
1020
tee_ta_cancel_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id)1021 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
1022 struct tee_ta_session *sess,
1023 const TEE_Identity *clnt_id)
1024 {
1025 *err = TEE_ORIGIN_TEE;
1026
1027 if (check_client(sess, clnt_id) != TEE_SUCCESS)
1028 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1029
1030 sess->cancel = true;
1031 return TEE_SUCCESS;
1032 }
1033
tee_ta_session_is_cancelled(struct tee_ta_session * s,TEE_Time * curr_time)1034 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1035 {
1036 TEE_Time current_time;
1037
1038 if (s->cancel_mask)
1039 return false;
1040
1041 if (s->cancel)
1042 return true;
1043
1044 if (s->cancel_time.seconds == UINT32_MAX)
1045 return false;
1046
1047 if (curr_time != NULL)
1048 current_time = *curr_time;
1049 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
1050 return false;
1051
1052 if (current_time.seconds > s->cancel_time.seconds ||
1053 (current_time.seconds == s->cancel_time.seconds &&
1054 current_time.millis >= s->cancel_time.millis)) {
1055 return true;
1056 }
1057
1058 return false;
1059 }
1060
1061 #if defined(CFG_TA_GPROF_SUPPORT)
tee_ta_gprof_sample_pc(vaddr_t pc)1062 void tee_ta_gprof_sample_pc(vaddr_t pc)
1063 {
1064 struct ts_session *s = ts_get_current_session();
1065 struct user_ta_ctx *utc = NULL;
1066 struct sample_buf *sbuf = NULL;
1067 TEE_Result res = 0;
1068 size_t idx = 0;
1069
1070 sbuf = s->sbuf;
1071 if (!sbuf || !sbuf->enabled)
1072 return; /* PC sampling is not enabled */
1073
1074 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1075 if (idx < sbuf->nsamples) {
1076 utc = to_user_ta_ctx(s->ctx);
1077 res = vm_check_access_rights(&utc->uctx,
1078 TEE_MEMORY_ACCESS_READ |
1079 TEE_MEMORY_ACCESS_WRITE |
1080 TEE_MEMORY_ACCESS_ANY_OWNER,
1081 (uaddr_t)&sbuf->samples[idx],
1082 sizeof(*sbuf->samples));
1083 if (res != TEE_SUCCESS)
1084 return;
1085 sbuf->samples[idx]++;
1086 }
1087 sbuf->count++;
1088 }
1089
gprof_update_session_utime(bool suspend,struct ts_session * s,uint64_t now)1090 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1091 uint64_t now)
1092 {
1093 struct sample_buf *sbuf = s->sbuf;
1094
1095 if (!sbuf)
1096 return;
1097
1098 if (suspend) {
1099 assert(sbuf->usr_entered);
1100 sbuf->usr += now - sbuf->usr_entered;
1101 sbuf->usr_entered = 0;
1102 } else {
1103 assert(!sbuf->usr_entered);
1104 if (!now)
1105 now++; /* 0 is reserved */
1106 sbuf->usr_entered = now;
1107 }
1108 }
1109
1110 /*
1111 * Update user-mode CPU time for the current session
1112 * @suspend: true if session is being suspended (leaving user mode), false if
1113 * it is resumed (entering user mode)
1114 */
tee_ta_update_session_utime(bool suspend)1115 static void tee_ta_update_session_utime(bool suspend)
1116 {
1117 struct ts_session *s = ts_get_current_session();
1118 uint64_t now = barrier_read_counter_timer();
1119
1120 gprof_update_session_utime(suspend, s, now);
1121 }
1122
tee_ta_update_session_utime_suspend(void)1123 void tee_ta_update_session_utime_suspend(void)
1124 {
1125 tee_ta_update_session_utime(true);
1126 }
1127
tee_ta_update_session_utime_resume(void)1128 void tee_ta_update_session_utime_resume(void)
1129 {
1130 tee_ta_update_session_utime(false);
1131 }
1132 #endif
1133
1134 #if defined(CFG_FTRACE_SUPPORT)
ftrace_update_times(bool suspend)1135 static void ftrace_update_times(bool suspend)
1136 {
1137 struct ts_session *s = ts_get_current_session_may_fail();
1138 struct ftrace_buf *fbuf = NULL;
1139 uint64_t now = 0;
1140 uint32_t i = 0;
1141
1142 if (!s)
1143 return;
1144
1145 now = barrier_read_counter_timer();
1146
1147 fbuf = s->fbuf;
1148 if (!fbuf)
1149 return;
1150
1151 if (suspend) {
1152 fbuf->suspend_time = now;
1153 } else {
1154 for (i = 0; i <= fbuf->ret_idx; i++)
1155 fbuf->begin_time[i] += now - fbuf->suspend_time;
1156 }
1157 }
1158
tee_ta_ftrace_update_times_suspend(void)1159 void tee_ta_ftrace_update_times_suspend(void)
1160 {
1161 ftrace_update_times(true);
1162 }
1163
tee_ta_ftrace_update_times_resume(void)1164 void tee_ta_ftrace_update_times_resume(void)
1165 {
1166 ftrace_update_times(false);
1167 }
1168 #endif
1169
is_ta_ctx(struct ts_ctx * ctx)1170 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1171 {
1172 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1173 }
1174