1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2020, Arm Limited
5 * Copyright (c) 2025, NVIDIA Corporation & AFFILIATES.
6 */
7
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/panic.h>
11 #include <kernel/pseudo_ta.h>
12 #include <kernel/stmm_sp.h>
13 #include <kernel/tee_common.h>
14 #include <kernel/tee_misc.h>
15 #include <kernel/tee_ta_manager.h>
16 #include <kernel/tee_time.h>
17 #include <kernel/thread.h>
18 #include <kernel/user_mode_ctx.h>
19 #include <kernel/user_ta.h>
20 #include <malloc.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <mm/vm.h>
25 #include <pta_stats.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <tee_api_types.h>
29 #include <tee/entry_std.h>
30 #include <tee/tee_obj.h>
31 #include <trace.h>
32 #include <types_ext.h>
33 #include <user_ta_header.h>
34 #include <utee_types.h>
35 #include <util.h>
36
37 #if defined(CFG_TA_STATS)
38 #define MAX_DUMP_SESS_NUM (16)
39
40 struct tee_ta_dump_ctx {
41 TEE_UUID uuid;
42 uint32_t panicked;
43 bool is_user_ta;
44 uint32_t sess_num;
45 uint32_t sess_id[MAX_DUMP_SESS_NUM];
46 };
47 #endif
48
49 /* This mutex protects the critical section in tee_ta_init_session */
50 struct mutex tee_ta_mutex = MUTEX_INITIALIZER;
51 /* This condvar is used when waiting for a TA context to become initialized */
52 struct condvar tee_ta_init_cv = CONDVAR_INITIALIZER;
53 struct tee_ta_ctx_head tee_ctxes = TAILQ_HEAD_INITIALIZER(tee_ctxes);
54
55 #ifndef CFG_CONCURRENT_SINGLE_INSTANCE_TA
56 static struct condvar tee_ta_cv = CONDVAR_INITIALIZER;
57 static short int tee_ta_single_instance_thread = THREAD_ID_INVALID;
58 static size_t tee_ta_single_instance_count;
59 #endif
60
61 #ifdef CFG_CONCURRENT_SINGLE_INSTANCE_TA
lock_single_instance(void)62 static void lock_single_instance(void)
63 {
64 }
65
unlock_single_instance(void)66 static void unlock_single_instance(void)
67 {
68 }
69
has_single_instance_lock(void)70 static bool has_single_instance_lock(void)
71 {
72 return false;
73 }
74 #else
lock_single_instance(void)75 static void lock_single_instance(void)
76 {
77 /* Requires tee_ta_mutex to be held */
78 if (tee_ta_single_instance_thread != thread_get_id()) {
79 /* Wait until the single-instance lock is available. */
80 while (tee_ta_single_instance_thread != THREAD_ID_INVALID)
81 condvar_wait(&tee_ta_cv, &tee_ta_mutex);
82
83 tee_ta_single_instance_thread = thread_get_id();
84 assert(tee_ta_single_instance_count == 0);
85 }
86
87 tee_ta_single_instance_count++;
88 }
89
unlock_single_instance(void)90 static void unlock_single_instance(void)
91 {
92 /* Requires tee_ta_mutex to be held */
93 assert(tee_ta_single_instance_thread == thread_get_id());
94 assert(tee_ta_single_instance_count > 0);
95
96 tee_ta_single_instance_count--;
97 if (tee_ta_single_instance_count == 0) {
98 tee_ta_single_instance_thread = THREAD_ID_INVALID;
99 condvar_signal(&tee_ta_cv);
100 }
101 }
102
has_single_instance_lock(void)103 static bool has_single_instance_lock(void)
104 {
105 /* Requires tee_ta_mutex to be held */
106 return tee_ta_single_instance_thread == thread_get_id();
107 }
108 #endif
109
to_ta_session(struct ts_session * sess)110 struct tee_ta_session *__noprof to_ta_session(struct ts_session *sess)
111 {
112 assert(is_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx));
113 return container_of(sess, struct tee_ta_session, ts_sess);
114 }
115
ts_to_ta_ctx(struct ts_ctx * ctx)116 static struct tee_ta_ctx *ts_to_ta_ctx(struct ts_ctx *ctx)
117 {
118 if (is_ta_ctx(ctx))
119 return to_ta_ctx(ctx);
120
121 if (is_stmm_ctx(ctx))
122 return &(to_stmm_ctx(ctx)->ta_ctx);
123
124 panic("bad context");
125 }
126
tee_ta_try_set_busy(struct tee_ta_ctx * ctx)127 static bool tee_ta_try_set_busy(struct tee_ta_ctx *ctx)
128 {
129 bool rc = true;
130
131 if (ctx->flags & TA_FLAG_CONCURRENT)
132 return true;
133
134 mutex_lock(&tee_ta_mutex);
135
136 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
137 lock_single_instance();
138
139 if (has_single_instance_lock()) {
140 if (ctx->busy) {
141 /*
142 * We're holding the single-instance lock and the
143 * TA is busy, as waiting now would only cause a
144 * dead-lock, we release the lock and return false.
145 */
146 rc = false;
147 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
148 unlock_single_instance();
149 }
150 } else {
151 /*
152 * We're not holding the single-instance lock, we're free to
153 * wait for the TA to become available.
154 */
155 while (ctx->busy)
156 condvar_wait(&ctx->busy_cv, &tee_ta_mutex);
157 }
158
159 /* Either it's already true or we should set it to true */
160 ctx->busy = true;
161
162 mutex_unlock(&tee_ta_mutex);
163 return rc;
164 }
165
tee_ta_set_busy(struct tee_ta_ctx * ctx)166 static void tee_ta_set_busy(struct tee_ta_ctx *ctx)
167 {
168 if (!tee_ta_try_set_busy(ctx))
169 panic();
170 }
171
tee_ta_clear_busy(struct tee_ta_ctx * ctx)172 static void tee_ta_clear_busy(struct tee_ta_ctx *ctx)
173 {
174 if (ctx->flags & TA_FLAG_CONCURRENT)
175 return;
176
177 mutex_lock(&tee_ta_mutex);
178
179 assert(ctx->busy);
180 ctx->busy = false;
181 condvar_signal(&ctx->busy_cv);
182
183 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
184 unlock_single_instance();
185
186 mutex_unlock(&tee_ta_mutex);
187 }
188
dec_session_ref_count(struct tee_ta_session * s)189 static void dec_session_ref_count(struct tee_ta_session *s)
190 {
191 assert(s->ref_count > 0);
192 s->ref_count--;
193 if (s->ref_count == 1)
194 condvar_signal(&s->refc_cv);
195 }
196
tee_ta_put_session(struct tee_ta_session * s)197 void tee_ta_put_session(struct tee_ta_session *s)
198 {
199 mutex_lock(&tee_ta_mutex);
200
201 if (s->lock_thread == thread_get_id()) {
202 s->lock_thread = THREAD_ID_INVALID;
203 condvar_signal(&s->lock_cv);
204 }
205 dec_session_ref_count(s);
206
207 mutex_unlock(&tee_ta_mutex);
208 }
209
tee_ta_find_session_nolock(uint32_t id,struct tee_ta_session_head * open_sessions)210 static struct tee_ta_session *tee_ta_find_session_nolock(uint32_t id,
211 struct tee_ta_session_head *open_sessions)
212 {
213 struct tee_ta_session *s = NULL;
214 struct tee_ta_session *found = NULL;
215
216 TAILQ_FOREACH(s, open_sessions, link) {
217 if (s->id == id) {
218 found = s;
219 break;
220 }
221 }
222
223 return found;
224 }
225
tee_ta_find_session(uint32_t id,struct tee_ta_session_head * open_sessions)226 struct tee_ta_session *tee_ta_find_session(uint32_t id,
227 struct tee_ta_session_head *open_sessions)
228 {
229 struct tee_ta_session *s = NULL;
230
231 mutex_lock(&tee_ta_mutex);
232
233 s = tee_ta_find_session_nolock(id, open_sessions);
234
235 mutex_unlock(&tee_ta_mutex);
236
237 return s;
238 }
239
tee_ta_get_session(uint32_t id,bool exclusive,struct tee_ta_session_head * open_sessions)240 struct tee_ta_session *tee_ta_get_session(uint32_t id, bool exclusive,
241 struct tee_ta_session_head *open_sessions)
242 {
243 struct tee_ta_session *s;
244
245 mutex_lock(&tee_ta_mutex);
246
247 while (true) {
248 s = tee_ta_find_session_nolock(id, open_sessions);
249 if (!s)
250 break;
251 if (s->unlink) {
252 s = NULL;
253 break;
254 }
255 s->ref_count++;
256 if (!exclusive)
257 break;
258
259 assert(s->lock_thread != thread_get_id());
260
261 while (s->lock_thread != THREAD_ID_INVALID && !s->unlink)
262 condvar_wait(&s->lock_cv, &tee_ta_mutex);
263
264 if (s->unlink) {
265 dec_session_ref_count(s);
266 s = NULL;
267 break;
268 }
269
270 s->lock_thread = thread_get_id();
271 break;
272 }
273
274 mutex_unlock(&tee_ta_mutex);
275 return s;
276 }
277
tee_ta_unlink_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)278 static void tee_ta_unlink_session(struct tee_ta_session *s,
279 struct tee_ta_session_head *open_sessions)
280 {
281 mutex_lock(&tee_ta_mutex);
282
283 assert(s->ref_count >= 1);
284 assert(s->lock_thread == thread_get_id());
285 assert(!s->unlink);
286
287 s->unlink = true;
288 condvar_broadcast(&s->lock_cv);
289
290 while (s->ref_count != 1)
291 condvar_wait(&s->refc_cv, &tee_ta_mutex);
292
293 TAILQ_REMOVE(open_sessions, s, link);
294
295 mutex_unlock(&tee_ta_mutex);
296 }
297
dump_ftrace(struct tee_ta_session * s __maybe_unused)298 static void dump_ftrace(struct tee_ta_session *s __maybe_unused)
299 {
300 #if defined(CFG_FTRACE_SUPPORT)
301 struct ts_ctx *ts_ctx = s->ts_sess.ctx;
302
303 if (ts_ctx && ts_ctx->ops->dump_ftrace &&
304 core_mmu_user_mapping_is_active()) {
305 ts_push_current_session(&s->ts_sess);
306 ts_ctx->ops->dump_ftrace(ts_ctx);
307 ts_pop_current_session();
308 }
309 #endif
310 }
311
destroy_session(struct tee_ta_session * s,struct tee_ta_session_head * open_sessions)312 static void destroy_session(struct tee_ta_session *s,
313 struct tee_ta_session_head *open_sessions)
314 {
315 dump_ftrace(s);
316
317 tee_ta_unlink_session(s, open_sessions);
318 #if defined(CFG_TA_GPROF_SUPPORT)
319 free(s->ts_sess.sbuf);
320 #endif
321 free(s);
322 }
323
destroy_context(struct tee_ta_ctx * ctx)324 static void destroy_context(struct tee_ta_ctx *ctx)
325 {
326 DMSG("Destroy TA ctx (0x%" PRIxVA ")", (vaddr_t)ctx);
327
328 condvar_destroy(&ctx->busy_cv);
329 ctx->ts_ctx.ops->destroy(&ctx->ts_ctx);
330 }
331
332 /*
333 * tee_ta_context_find - Find TA in session list based on a UUID (input)
334 * Returns a pointer to the session
335 */
tee_ta_context_find(const TEE_UUID * uuid)336 static struct tee_ta_ctx *tee_ta_context_find(const TEE_UUID *uuid)
337 {
338 struct tee_ta_ctx *ctx;
339
340 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
341 if (memcmp(&ctx->ts_ctx.uuid, uuid, sizeof(TEE_UUID)) == 0)
342 return ctx;
343 }
344
345 return NULL;
346 }
347
348 /* check if requester (client ID) matches session initial client */
check_client(struct tee_ta_session * s,const TEE_Identity * id)349 static TEE_Result check_client(struct tee_ta_session *s, const TEE_Identity *id)
350 {
351 if (id == KERN_IDENTITY)
352 return TEE_SUCCESS;
353
354 if (id == NSAPP_IDENTITY) {
355 if (s->clnt_id.login == TEE_LOGIN_TRUSTED_APP) {
356 DMSG("nsec tries to hijack TA session");
357 return TEE_ERROR_ACCESS_DENIED;
358 }
359 return TEE_SUCCESS;
360 }
361
362 if (memcmp(&s->clnt_id, id, sizeof(TEE_Identity)) != 0) {
363 DMSG("client id mismatch");
364 return TEE_ERROR_ACCESS_DENIED;
365 }
366 return TEE_SUCCESS;
367 }
368
369 /*
370 * Check if invocation parameters matches TA properties
371 *
372 * @s - current session handle
373 * @param - already identified memory references hold a valid 'mobj'.
374 *
375 * Policy:
376 * - All TAs can access 'non-secure' shared memory.
377 * - All TAs can access TEE private memory (seccpy)
378 * - Only SDP flagged TAs can accept SDP memory references.
379 */
380 #ifndef CFG_SECURE_DATA_PATH
check_params(struct tee_ta_session * sess __unused,struct tee_ta_param * param __unused)381 static bool check_params(struct tee_ta_session *sess __unused,
382 struct tee_ta_param *param __unused)
383 {
384 /*
385 * When CFG_SECURE_DATA_PATH is not enabled, SDP memory references
386 * are rejected at OP-TEE core entry. Hence here all TAs have same
387 * permissions regarding memory reference parameters.
388 */
389 return true;
390 }
391 #else
check_params(struct tee_ta_session * sess,struct tee_ta_param * param)392 static bool check_params(struct tee_ta_session *sess,
393 struct tee_ta_param *param)
394 {
395 int n;
396
397 /*
398 * When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
399 * SDP memory references. Only TAs flagged SDP can access SDP memory.
400 */
401 if (sess->ts_sess.ctx &&
402 ts_to_ta_ctx(sess->ts_sess.ctx)->flags & TA_FLAG_SECURE_DATA_PATH)
403 return true;
404
405 for (n = 0; n < TEE_NUM_PARAMS; n++) {
406 uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
407 struct param_mem *mem = ¶m->u[n].mem;
408
409 if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
410 param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
411 param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
412 continue;
413 if (!mem->size)
414 continue;
415 if (mobj_is_sdp_mem(mem->mobj))
416 return false;
417 }
418 return true;
419 }
420 #endif
421
set_invoke_timeout(struct tee_ta_session * sess,uint32_t cancel_req_to)422 static void set_invoke_timeout(struct tee_ta_session *sess,
423 uint32_t cancel_req_to)
424 {
425 TEE_Time current_time;
426 TEE_Time cancel_time;
427
428 if (cancel_req_to == TEE_TIMEOUT_INFINITE)
429 goto infinite;
430
431 if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
432 goto infinite;
433
434 if (ADD_OVERFLOW(current_time.seconds, cancel_req_to / 1000,
435 &cancel_time.seconds))
436 goto infinite;
437
438 cancel_time.millis = current_time.millis + cancel_req_to % 1000;
439 if (cancel_time.millis > 1000) {
440 if (ADD_OVERFLOW(current_time.seconds, 1,
441 &cancel_time.seconds))
442 goto infinite;
443
444 cancel_time.seconds++;
445 cancel_time.millis -= 1000;
446 }
447
448 sess->cancel_time = cancel_time;
449 return;
450
451 infinite:
452 sess->cancel_time.seconds = UINT32_MAX;
453 sess->cancel_time.millis = UINT32_MAX;
454 }
455
456 /*-----------------------------------------------------------------------------
457 * Close a Trusted Application and free available resources
458 *---------------------------------------------------------------------------*/
tee_ta_close_session(struct tee_ta_session * csess,struct tee_ta_session_head * open_sessions,const TEE_Identity * clnt_id)459 TEE_Result tee_ta_close_session(struct tee_ta_session *csess,
460 struct tee_ta_session_head *open_sessions,
461 const TEE_Identity *clnt_id)
462 {
463 struct tee_ta_session *sess = NULL;
464 struct tee_ta_ctx *ctx = NULL;
465 struct ts_ctx *ts_ctx = NULL;
466 bool keep_crashed = false;
467 bool keep_alive = false;
468
469 DMSG("csess 0x%" PRIxVA " id %u",
470 (vaddr_t)csess, csess ? csess->id : UINT_MAX);
471
472 if (!csess)
473 return TEE_ERROR_ITEM_NOT_FOUND;
474
475 sess = tee_ta_get_session(csess->id, true, open_sessions);
476
477 if (!sess) {
478 EMSG("session 0x%" PRIxVA " to be removed is not found",
479 (vaddr_t)csess);
480 return TEE_ERROR_ITEM_NOT_FOUND;
481 }
482
483 if (check_client(sess, clnt_id) != TEE_SUCCESS) {
484 tee_ta_put_session(sess);
485 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
486 }
487
488 DMSG("Destroy session");
489
490 ts_ctx = sess->ts_sess.ctx;
491 if (!ts_ctx) {
492 destroy_session(sess, open_sessions);
493 return TEE_SUCCESS;
494 }
495
496 ctx = ts_to_ta_ctx(ts_ctx);
497 if (ctx->panicked) {
498 destroy_session(sess, open_sessions);
499 } else {
500 tee_ta_set_busy(ctx);
501 set_invoke_timeout(sess, TEE_TIMEOUT_INFINITE);
502 ts_ctx->ops->enter_close_session(&sess->ts_sess);
503 destroy_session(sess, open_sessions);
504 tee_ta_clear_busy(ctx);
505 }
506
507 mutex_lock(&tee_ta_mutex);
508
509 if (ctx->ref_count <= 0)
510 panic();
511
512 ctx->ref_count--;
513 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
514 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
515 if (keep_alive)
516 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
517 if (!ctx->ref_count &&
518 ((ctx->panicked && !keep_crashed) || !keep_alive)) {
519 if (!ctx->is_releasing) {
520 TAILQ_REMOVE(&tee_ctxes, ctx, link);
521 ctx->is_releasing = true;
522 }
523 mutex_unlock(&tee_ta_mutex);
524
525 destroy_context(ctx);
526 } else
527 mutex_unlock(&tee_ta_mutex);
528
529 return TEE_SUCCESS;
530 }
531
tee_ta_init_session_with_context(struct tee_ta_session * s,const TEE_UUID * uuid)532 static TEE_Result tee_ta_init_session_with_context(struct tee_ta_session *s,
533 const TEE_UUID *uuid)
534 {
535 struct tee_ta_ctx *ctx = NULL;
536
537 while (true) {
538 ctx = tee_ta_context_find(uuid);
539 if (!ctx)
540 return TEE_ERROR_ITEM_NOT_FOUND;
541
542 if (!ctx->is_initializing)
543 break;
544 /*
545 * Context is still initializing, wait here until it's
546 * fully initialized. Note that we're searching for the
547 * context again since it may have been removed while we
548 * where sleeping.
549 */
550 condvar_wait(&tee_ta_init_cv, &tee_ta_mutex);
551 }
552
553 /*
554 * If the trusted service is not a single instance service (e.g. is
555 * a multi-instance TA) it should be loaded as a new instance instead
556 * of doing anything with this instance. So tell the caller that we
557 * didn't find the TA it the caller will load a new instance.
558 */
559 if ((ctx->flags & TA_FLAG_SINGLE_INSTANCE) == 0)
560 return TEE_ERROR_ITEM_NOT_FOUND;
561
562 /*
563 * The trusted service is single instance, if it isn't multi session we
564 * can't create another session unless its reference is zero
565 */
566 if (!(ctx->flags & TA_FLAG_MULTI_SESSION) && ctx->ref_count)
567 return TEE_ERROR_BUSY;
568
569 DMSG("Re-open trusted service %pUl", (void *)&ctx->ts_ctx.uuid);
570
571 ctx->ref_count++;
572 s->ts_sess.ctx = &ctx->ts_ctx;
573 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
574 return TEE_SUCCESS;
575 }
576
new_session_id(struct tee_ta_session_head * open_sessions)577 static uint32_t new_session_id(struct tee_ta_session_head *open_sessions)
578 {
579 struct tee_ta_session *last = NULL;
580 uint32_t saved = 0;
581 uint32_t id = 1;
582
583 last = TAILQ_LAST(open_sessions, tee_ta_session_head);
584 if (last) {
585 /* This value is less likely to be already used */
586 id = last->id + 1;
587 if (!id)
588 id++; /* 0 is not valid */
589 }
590
591 saved = id;
592 do {
593 if (!tee_ta_find_session_nolock(id, open_sessions))
594 return id;
595 id++;
596 if (!id)
597 id++;
598 } while (id != saved);
599
600 return 0;
601 }
602
tee_ta_init_session(TEE_ErrorOrigin * err,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,struct tee_ta_session ** sess)603 static TEE_Result tee_ta_init_session(TEE_ErrorOrigin *err,
604 struct tee_ta_session_head *open_sessions,
605 const TEE_UUID *uuid,
606 struct tee_ta_session **sess)
607 {
608 TEE_Result res;
609 struct tee_ta_session *s = calloc(1, sizeof(struct tee_ta_session));
610
611 *err = TEE_ORIGIN_TEE;
612 if (!s)
613 return TEE_ERROR_OUT_OF_MEMORY;
614
615 s->cancel_mask = true;
616 condvar_init(&s->refc_cv);
617 condvar_init(&s->lock_cv);
618 s->lock_thread = THREAD_ID_INVALID;
619 s->ref_count = 1;
620
621 mutex_lock(&tee_ta_mutex);
622 s->id = new_session_id(open_sessions);
623 if (!s->id) {
624 res = TEE_ERROR_OVERFLOW;
625 goto err_mutex_unlock;
626 }
627
628 TAILQ_INSERT_TAIL(open_sessions, s, link);
629
630 /* Look for already loaded TA */
631 res = tee_ta_init_session_with_context(s, uuid);
632 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
633 mutex_unlock(&tee_ta_mutex);
634 goto out;
635 }
636
637 /* Look for secure partition */
638 res = stmm_init_session(uuid, s);
639 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
640 mutex_unlock(&tee_ta_mutex);
641 if (res == TEE_SUCCESS)
642 res = stmm_complete_session(s);
643
644 goto out;
645 }
646
647 /* Look for pseudo TA */
648 res = tee_ta_init_pseudo_ta_session(uuid, s);
649 if (res == TEE_SUCCESS || res != TEE_ERROR_ITEM_NOT_FOUND) {
650 mutex_unlock(&tee_ta_mutex);
651 goto out;
652 }
653
654 /* Look for user TA */
655 res = tee_ta_init_user_ta_session(uuid, s);
656 mutex_unlock(&tee_ta_mutex);
657 if (res == TEE_SUCCESS)
658 res = tee_ta_complete_user_ta_session(s);
659
660 out:
661 if (!res) {
662 *sess = s;
663 return TEE_SUCCESS;
664 }
665
666 mutex_lock(&tee_ta_mutex);
667 TAILQ_REMOVE(open_sessions, s, link);
668 err_mutex_unlock:
669 mutex_unlock(&tee_ta_mutex);
670 free(s);
671 return res;
672 }
673
maybe_release_ta_ctx(struct tee_ta_ctx * ctx)674 static void maybe_release_ta_ctx(struct tee_ta_ctx *ctx)
675 {
676 bool was_releasing = false;
677 bool keep_crashed = false;
678 bool keep_alive = false;
679
680 if (ctx->flags & TA_FLAG_SINGLE_INSTANCE)
681 keep_alive = ctx->flags & TA_FLAG_INSTANCE_KEEP_ALIVE;
682 if (keep_alive)
683 keep_crashed = ctx->flags & TA_FLAG_INSTANCE_KEEP_CRASHED;
684
685 /*
686 * Keep panicked TAs with SINGLE_INSTANCE, KEEP_ALIVE, and KEEP_CRASHED
687 * flags in the context list to maintain their panicked status and
688 * prevent respawning.
689 */
690 if (!keep_crashed) {
691 mutex_lock(&tee_ta_mutex);
692 was_releasing = ctx->is_releasing;
693 ctx->is_releasing = true;
694 if (!was_releasing) {
695 DMSG("Releasing panicked TA ctx");
696 TAILQ_REMOVE(&tee_ctxes, ctx, link);
697 }
698 mutex_unlock(&tee_ta_mutex);
699
700 if (!was_releasing)
701 ctx->ts_ctx.ops->release_state(&ctx->ts_ctx);
702 }
703 }
704
tee_ta_open_session(TEE_ErrorOrigin * err,struct tee_ta_session ** sess,struct tee_ta_session_head * open_sessions,const TEE_UUID * uuid,const TEE_Identity * clnt_id,uint32_t cancel_req_to,struct tee_ta_param * param)705 TEE_Result tee_ta_open_session(TEE_ErrorOrigin *err,
706 struct tee_ta_session **sess,
707 struct tee_ta_session_head *open_sessions,
708 const TEE_UUID *uuid,
709 const TEE_Identity *clnt_id,
710 uint32_t cancel_req_to,
711 struct tee_ta_param *param)
712 {
713 TEE_Result res = TEE_SUCCESS;
714 struct tee_ta_session *s = NULL;
715 struct tee_ta_ctx *ctx = NULL;
716 struct ts_ctx *ts_ctx = NULL;
717 bool panicked = false;
718 bool was_busy = false;
719
720 res = tee_ta_init_session(err, open_sessions, uuid, &s);
721 if (res != TEE_SUCCESS) {
722 DMSG("init session failed 0x%x", res);
723 return res;
724 }
725
726 if (!check_params(s, param))
727 return TEE_ERROR_BAD_PARAMETERS;
728
729 ts_ctx = s->ts_sess.ctx;
730 ctx = ts_to_ta_ctx(ts_ctx);
731
732 if (tee_ta_try_set_busy(ctx)) {
733 if (!ctx->panicked) {
734 /* Save identity of the owner of the session */
735 s->clnt_id = *clnt_id;
736 s->param = param;
737 set_invoke_timeout(s, cancel_req_to);
738 res = ts_ctx->ops->enter_open_session(&s->ts_sess);
739 s->param = NULL;
740 }
741
742 panicked = ctx->panicked;
743 if (panicked) {
744 maybe_release_ta_ctx(ctx);
745 res = TEE_ERROR_TARGET_DEAD;
746 } else {
747 if (IS_ENABLED(CFG_FTRACE_DUMP_EVERY_ENTRY))
748 dump_ftrace(s);
749 }
750
751 tee_ta_clear_busy(ctx);
752 } else {
753 /* Deadlock avoided */
754 res = TEE_ERROR_BUSY;
755 was_busy = true;
756 }
757
758 /*
759 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
760 * apart from panicking.
761 */
762 if (panicked || was_busy)
763 *err = TEE_ORIGIN_TEE;
764 else
765 *err = s->err_origin;
766
767 tee_ta_put_session(s);
768 if (panicked || res != TEE_SUCCESS)
769 tee_ta_close_session(s, open_sessions, KERN_IDENTITY);
770
771 if (!res)
772 *sess = s;
773 else
774 EMSG("Failed for TA %pUl. Return error %#"PRIx32, uuid, res);
775
776 return res;
777 }
778
tee_ta_invoke_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id,uint32_t cancel_req_to,uint32_t cmd,struct tee_ta_param * param)779 TEE_Result tee_ta_invoke_command(TEE_ErrorOrigin *err,
780 struct tee_ta_session *sess,
781 const TEE_Identity *clnt_id,
782 uint32_t cancel_req_to, uint32_t cmd,
783 struct tee_ta_param *param)
784 {
785 struct tee_ta_ctx *ta_ctx = NULL;
786 struct ts_ctx *ts_ctx = NULL;
787 TEE_Result res = TEE_SUCCESS;
788 bool panicked = false;
789
790 if (check_client(sess, clnt_id) != TEE_SUCCESS)
791 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
792
793 if (!check_params(sess, param))
794 return TEE_ERROR_BAD_PARAMETERS;
795
796 ts_ctx = sess->ts_sess.ctx;
797 ta_ctx = ts_to_ta_ctx(ts_ctx);
798
799 tee_ta_set_busy(ta_ctx);
800
801 if (!ta_ctx->panicked) {
802 sess->param = param;
803 set_invoke_timeout(sess, cancel_req_to);
804 res = ts_ctx->ops->enter_invoke_cmd(&sess->ts_sess, cmd);
805 sess->param = NULL;
806 }
807
808 panicked = ta_ctx->panicked;
809 if (panicked) {
810 maybe_release_ta_ctx(ta_ctx);
811 res = TEE_ERROR_TARGET_DEAD;
812 } else {
813 if (IS_ENABLED(CFG_FTRACE_DUMP_EVERY_ENTRY))
814 dump_ftrace(sess);
815 }
816
817 tee_ta_clear_busy(ta_ctx);
818
819 /*
820 * Origin error equal to TEE_ORIGIN_TRUSTED_APP for "regular" error,
821 * apart from panicking.
822 */
823 if (panicked)
824 *err = TEE_ORIGIN_TEE;
825 else
826 *err = sess->err_origin;
827
828 /* Short buffer is not an effective error case */
829 if (res != TEE_SUCCESS && res != TEE_ERROR_SHORT_BUFFER)
830 DMSG("Error: %x of %d", res, *err);
831
832 return res;
833 }
834
835 #if defined(CFG_TA_STATS)
dump_ta_memstats(struct tee_ta_session * s,struct tee_ta_param * param)836 static TEE_Result dump_ta_memstats(struct tee_ta_session *s,
837 struct tee_ta_param *param)
838 {
839 TEE_Result res = TEE_SUCCESS;
840 struct tee_ta_ctx *ctx = NULL;
841 struct ts_ctx *ts_ctx = NULL;
842 bool panicked = false;
843
844 ts_ctx = s->ts_sess.ctx;
845 if (!ts_ctx)
846 return TEE_ERROR_ITEM_NOT_FOUND;
847
848 ctx = ts_to_ta_ctx(ts_ctx);
849
850 if (ctx->is_initializing)
851 return TEE_ERROR_BAD_STATE;
852
853 if (tee_ta_try_set_busy(ctx)) {
854 if (!ctx->panicked) {
855 s->param = param;
856 set_invoke_timeout(s, TEE_TIMEOUT_INFINITE);
857 res = ts_ctx->ops->dump_mem_stats(&s->ts_sess);
858 s->param = NULL;
859 }
860
861 panicked = ctx->panicked;
862 if (panicked) {
863 maybe_release_ta_ctx(ctx);
864 res = TEE_ERROR_TARGET_DEAD;
865 }
866
867 tee_ta_clear_busy(ctx);
868 } else {
869 /* Deadlock avoided */
870 res = TEE_ERROR_BUSY;
871 }
872
873 return res;
874 }
875
init_dump_ctx(struct tee_ta_dump_ctx * dump_ctx)876 static void init_dump_ctx(struct tee_ta_dump_ctx *dump_ctx)
877 {
878 struct tee_ta_session *sess = NULL;
879 struct tee_ta_session_head *open_sessions = NULL;
880 struct tee_ta_ctx *ctx = NULL;
881 unsigned int n = 0;
882
883 nsec_sessions_list_head(&open_sessions);
884 /*
885 * Scan all sessions opened from secure side by searching through
886 * all available TA instances and for each context, scan all opened
887 * sessions.
888 */
889 TAILQ_FOREACH(ctx, &tee_ctxes, link) {
890 unsigned int cnt = 0;
891
892 if (!is_user_ta_ctx(&ctx->ts_ctx))
893 continue;
894
895 memcpy(&dump_ctx[n].uuid, &ctx->ts_ctx.uuid,
896 sizeof(ctx->ts_ctx.uuid));
897 dump_ctx[n].panicked = ctx->panicked;
898 dump_ctx[n].is_user_ta = is_user_ta_ctx(&ctx->ts_ctx);
899 TAILQ_FOREACH(sess, open_sessions, link) {
900 if (sess->ts_sess.ctx == &ctx->ts_ctx) {
901 if (cnt == MAX_DUMP_SESS_NUM)
902 break;
903
904 dump_ctx[n].sess_id[cnt] = sess->id;
905 cnt++;
906 }
907 }
908
909 dump_ctx[n].sess_num = cnt;
910 n++;
911 }
912 }
913
dump_ta_stats(struct tee_ta_dump_ctx * dump_ctx,struct pta_stats_ta * dump_stats,size_t ta_count)914 static TEE_Result dump_ta_stats(struct tee_ta_dump_ctx *dump_ctx,
915 struct pta_stats_ta *dump_stats,
916 size_t ta_count)
917 {
918 TEE_Result res = TEE_SUCCESS;
919 struct tee_ta_session *sess = NULL;
920 struct tee_ta_session_head *open_sessions = NULL;
921 struct tee_ta_param param = { };
922 unsigned int i = 0;
923 unsigned int j = 0;
924
925 nsec_sessions_list_head(&open_sessions);
926
927 for (i = 0; i < ta_count; i++) {
928 struct pta_stats_ta *stats = &dump_stats[i];
929
930 memcpy(&stats->uuid, &dump_ctx[i].uuid,
931 sizeof(dump_ctx[i].uuid));
932 stats->panicked = dump_ctx[i].panicked;
933 stats->sess_num = dump_ctx[i].sess_num;
934
935 /* Find a session from dump context */
936 for (j = 0, sess = NULL; j < dump_ctx[i].sess_num && !sess; j++)
937 sess = tee_ta_get_session(dump_ctx[i].sess_id[j], true,
938 open_sessions);
939
940 if (!sess)
941 continue;
942 /* If session is existing, get its heap stats */
943 memset(¶m, 0, sizeof(struct tee_ta_param));
944 param.types = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_OUTPUT,
945 TEE_PARAM_TYPE_VALUE_OUTPUT,
946 TEE_PARAM_TYPE_VALUE_OUTPUT,
947 TEE_PARAM_TYPE_NONE);
948 res = dump_ta_memstats(sess, ¶m);
949 if (res == TEE_SUCCESS) {
950 stats->heap.allocated = param.u[0].val.a;
951 stats->heap.max_allocated = param.u[0].val.b;
952 stats->heap.size = param.u[1].val.a;
953 stats->heap.num_alloc_fail = param.u[1].val.b;
954 stats->heap.biggest_alloc_fail = param.u[2].val.a;
955 stats->heap.biggest_alloc_fail_used = param.u[2].val.b;
956 } else {
957 memset(&stats->heap, 0, sizeof(stats->heap));
958 }
959 tee_ta_put_session(sess);
960 }
961
962 return TEE_SUCCESS;
963 }
964
tee_ta_instance_stats(void * buf,size_t * buf_size)965 TEE_Result tee_ta_instance_stats(void *buf, size_t *buf_size)
966 {
967 TEE_Result res = TEE_SUCCESS;
968 struct pta_stats_ta *dump_stats = NULL;
969 struct tee_ta_dump_ctx *dump_ctx = NULL;
970 struct tee_ta_ctx *ctx = NULL;
971 size_t sz = 0;
972 size_t ta_count = 0;
973
974 if (!buf_size)
975 return TEE_ERROR_BAD_PARAMETERS;
976
977 mutex_lock(&tee_ta_mutex);
978
979 /* Go through all available TA and calc out the actual buffer size. */
980 TAILQ_FOREACH(ctx, &tee_ctxes, link)
981 if (is_user_ta_ctx(&ctx->ts_ctx))
982 ta_count++;
983
984 sz = sizeof(struct pta_stats_ta) * ta_count;
985 if (!sz) {
986 /* sz = 0 means there is no UTA, return no item found. */
987 res = TEE_ERROR_ITEM_NOT_FOUND;
988 } else if (!buf || *buf_size < sz) {
989 /*
990 * buf is null or pass size less than actual size
991 * means caller try to query the buffer size.
992 * update *buf_size.
993 */
994 *buf_size = sz;
995 res = TEE_ERROR_SHORT_BUFFER;
996 } else if (!IS_ALIGNED_WITH_TYPE(buf, uint32_t)) {
997 DMSG("Data alignment");
998 res = TEE_ERROR_BAD_PARAMETERS;
999 } else {
1000 dump_stats = (struct pta_stats_ta *)buf;
1001 dump_ctx = malloc(sizeof(struct tee_ta_dump_ctx) * ta_count);
1002 if (!dump_ctx)
1003 res = TEE_ERROR_OUT_OF_MEMORY;
1004 else
1005 init_dump_ctx(dump_ctx);
1006 }
1007 mutex_unlock(&tee_ta_mutex);
1008
1009 if (res != TEE_SUCCESS)
1010 return res;
1011
1012 /* Dump user ta stats by iterating dump_ctx[] */
1013 res = dump_ta_stats(dump_ctx, dump_stats, ta_count);
1014 if (res == TEE_SUCCESS)
1015 *buf_size = sz;
1016
1017 free(dump_ctx);
1018 return res;
1019 }
1020 #endif
1021
tee_ta_cancel_command(TEE_ErrorOrigin * err,struct tee_ta_session * sess,const TEE_Identity * clnt_id)1022 TEE_Result tee_ta_cancel_command(TEE_ErrorOrigin *err,
1023 struct tee_ta_session *sess,
1024 const TEE_Identity *clnt_id)
1025 {
1026 *err = TEE_ORIGIN_TEE;
1027
1028 if (check_client(sess, clnt_id) != TEE_SUCCESS)
1029 return TEE_ERROR_BAD_PARAMETERS; /* intentional generic error */
1030
1031 sess->cancel = true;
1032 return TEE_SUCCESS;
1033 }
1034
tee_ta_session_is_cancelled(struct tee_ta_session * s,TEE_Time * curr_time)1035 bool tee_ta_session_is_cancelled(struct tee_ta_session *s, TEE_Time *curr_time)
1036 {
1037 TEE_Time current_time;
1038
1039 if (s->cancel_mask)
1040 return false;
1041
1042 if (s->cancel)
1043 return true;
1044
1045 if (s->cancel_time.seconds == UINT32_MAX)
1046 return false;
1047
1048 if (curr_time != NULL)
1049 current_time = *curr_time;
1050 else if (tee_time_get_sys_time(¤t_time) != TEE_SUCCESS)
1051 return false;
1052
1053 if (current_time.seconds > s->cancel_time.seconds ||
1054 (current_time.seconds == s->cancel_time.seconds &&
1055 current_time.millis >= s->cancel_time.millis)) {
1056 return true;
1057 }
1058
1059 return false;
1060 }
1061
1062 #if defined(CFG_TA_GPROF_SUPPORT)
tee_ta_gprof_sample_pc(vaddr_t pc)1063 void tee_ta_gprof_sample_pc(vaddr_t pc)
1064 {
1065 struct ts_session *s = ts_get_current_session();
1066 struct user_ta_ctx *utc = NULL;
1067 struct sample_buf *sbuf = NULL;
1068 TEE_Result res = 0;
1069 size_t idx = 0;
1070
1071 sbuf = s->sbuf;
1072 if (!sbuf || !sbuf->enabled)
1073 return; /* PC sampling is not enabled */
1074
1075 idx = (((uint64_t)pc - sbuf->offset)/2 * sbuf->scale)/65536;
1076 if (idx < sbuf->nsamples) {
1077 utc = to_user_ta_ctx(s->ctx);
1078 res = vm_check_access_rights(&utc->uctx,
1079 TEE_MEMORY_ACCESS_READ |
1080 TEE_MEMORY_ACCESS_WRITE |
1081 TEE_MEMORY_ACCESS_ANY_OWNER,
1082 (uaddr_t)&sbuf->samples[idx],
1083 sizeof(*sbuf->samples));
1084 if (res != TEE_SUCCESS)
1085 return;
1086 sbuf->samples[idx]++;
1087 }
1088 sbuf->count++;
1089 }
1090
gprof_update_session_utime(bool suspend,struct ts_session * s,uint64_t now)1091 static void gprof_update_session_utime(bool suspend, struct ts_session *s,
1092 uint64_t now)
1093 {
1094 struct sample_buf *sbuf = s->sbuf;
1095
1096 if (!sbuf)
1097 return;
1098
1099 if (suspend) {
1100 assert(sbuf->usr_entered);
1101 sbuf->usr += now - sbuf->usr_entered;
1102 sbuf->usr_entered = 0;
1103 } else {
1104 assert(!sbuf->usr_entered);
1105 if (!now)
1106 now++; /* 0 is reserved */
1107 sbuf->usr_entered = now;
1108 }
1109 }
1110
1111 /*
1112 * Update user-mode CPU time for the current session
1113 * @suspend: true if session is being suspended (leaving user mode), false if
1114 * it is resumed (entering user mode)
1115 */
tee_ta_update_session_utime(bool suspend)1116 static void tee_ta_update_session_utime(bool suspend)
1117 {
1118 struct ts_session *s = ts_get_current_session();
1119 uint64_t now = barrier_read_counter_timer();
1120
1121 gprof_update_session_utime(suspend, s, now);
1122 }
1123
tee_ta_update_session_utime_suspend(void)1124 void tee_ta_update_session_utime_suspend(void)
1125 {
1126 tee_ta_update_session_utime(true);
1127 }
1128
tee_ta_update_session_utime_resume(void)1129 void tee_ta_update_session_utime_resume(void)
1130 {
1131 tee_ta_update_session_utime(false);
1132 }
1133 #endif
1134
1135 #if defined(CFG_FTRACE_SUPPORT)
ftrace_update_times(bool suspend)1136 static void ftrace_update_times(bool suspend)
1137 {
1138 struct ts_session *s = ts_get_current_session_may_fail();
1139 struct ftrace_buf *fbuf = NULL;
1140 TEE_Result res = TEE_SUCCESS;
1141 uint64_t now = 0;
1142 uint32_t i = 0;
1143
1144 if (!s)
1145 return;
1146
1147 now = barrier_read_counter_timer();
1148
1149 fbuf = s->fbuf;
1150 if (!fbuf)
1151 return;
1152
1153 res = vm_check_access_rights(to_user_mode_ctx(s->ctx),
1154 TEE_MEMORY_ACCESS_WRITE |
1155 TEE_MEMORY_ACCESS_ANY_OWNER,
1156 (uaddr_t)fbuf, sizeof(*fbuf));
1157 if (res)
1158 return;
1159
1160 if (suspend) {
1161 fbuf->suspend_time = now;
1162 } else {
1163 for (i = 0; i <= fbuf->ret_idx; i++)
1164 fbuf->begin_time[i] += now - fbuf->suspend_time;
1165 }
1166 }
1167
tee_ta_ftrace_update_times_suspend(void)1168 void tee_ta_ftrace_update_times_suspend(void)
1169 {
1170 ftrace_update_times(true);
1171 }
1172
tee_ta_ftrace_update_times_resume(void)1173 void tee_ta_ftrace_update_times_resume(void)
1174 {
1175 ftrace_update_times(false);
1176 }
1177 #endif
1178
is_ta_ctx(struct ts_ctx * ctx)1179 bool __noprof is_ta_ctx(struct ts_ctx *ctx)
1180 {
1181 return is_user_ta_ctx(ctx) || is_pseudo_ta_ctx(ctx);
1182 }
1183