1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2022 Linaro Limited
5 * Copyright (c) 2020, Arm Limited.
6 */
7
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <initcall.h>
12 #include <keep.h>
13 #include <kernel/ldelf_loader.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/scall.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/ts_store.h>
20 #include <kernel/user_access.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <mm/file.h>
26 #include <mm/fobj.h>
27 #include <mm/mobj.h>
28 #include <mm/pgt_cache.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <mm/vm.h>
32 #include <optee_rpc_cmd.h>
33 #include <printk.h>
34 #include <signed_hdr.h>
35 #include <stdlib.h>
36 #include <sys/queue.h>
37 #include <tee/tee_cryp_utl.h>
38 #include <tee/tee_obj.h>
39 #include <tee/tee_svc_cryp.h>
40 #include <tee/tee_svc_storage.h>
41 #include <trace.h>
42 #include <types_ext.h>
43 #include <utee_defines.h>
44 #include <util.h>
45
init_utee_param(struct utee_params * up,const struct tee_ta_param * p,void * va[TEE_NUM_PARAMS])46 static TEE_Result init_utee_param(struct utee_params *up,
47 const struct tee_ta_param *p,
48 void *va[TEE_NUM_PARAMS])
49 {
50 TEE_Result res = TEE_SUCCESS;
51 size_t n = 0;
52 struct utee_params *up_bbuf = NULL;
53
54 up_bbuf = bb_alloc(sizeof(struct utee_params));
55 if (!up_bbuf)
56 return TEE_ERROR_OUT_OF_MEMORY;
57
58 up_bbuf->types = p->types;
59
60 for (n = 0; n < TEE_NUM_PARAMS; n++) {
61 uintptr_t a;
62 uintptr_t b;
63
64 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
65 case TEE_PARAM_TYPE_MEMREF_INPUT:
66 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
67 case TEE_PARAM_TYPE_MEMREF_INOUT:
68 a = (uintptr_t)va[n];
69 b = p->u[n].mem.size;
70 break;
71 case TEE_PARAM_TYPE_VALUE_INPUT:
72 case TEE_PARAM_TYPE_VALUE_INOUT:
73 a = p->u[n].val.a;
74 b = p->u[n].val.b;
75 break;
76 default:
77 a = 0;
78 b = 0;
79 break;
80 }
81 /* See comment for struct utee_params in utee_types.h */
82 up_bbuf->vals[n * 2] = a;
83 up_bbuf->vals[n * 2 + 1] = b;
84 }
85
86 res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
87
88 bb_free(up_bbuf, sizeof(struct utee_params));
89
90 return res;
91 }
92
update_from_utee_param(struct tee_ta_param * p,const struct utee_params * up)93 static void update_from_utee_param(struct tee_ta_param *p,
94 const struct utee_params *up)
95 {
96 TEE_Result res = TEE_SUCCESS;
97 size_t n = 0;
98 struct utee_params *up_bbuf = NULL;
99
100 res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
101 if (res)
102 return;
103
104 for (n = 0; n < TEE_NUM_PARAMS; n++) {
105 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
106 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
107 case TEE_PARAM_TYPE_MEMREF_INOUT:
108 /* See comment for struct utee_params in utee_types.h */
109 p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
110 break;
111 case TEE_PARAM_TYPE_VALUE_OUTPUT:
112 case TEE_PARAM_TYPE_VALUE_INOUT:
113 /* See comment for struct utee_params in utee_types.h */
114 p->u[n].val.a = up_bbuf->vals[n * 2];
115 p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
116 break;
117 default:
118 break;
119 }
120 }
121
122 bb_free(up_bbuf, sizeof(*up));
123 }
124
inc_recursion(void)125 static bool inc_recursion(void)
126 {
127 struct thread_specific_data *tsd = thread_get_tsd();
128
129 if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
130 DMSG("Maximum allowed recursion depth reached (%u)",
131 CFG_CORE_MAX_SYSCALL_RECURSION);
132 return false;
133 }
134
135 tsd->syscall_recursion++;
136 return true;
137 }
138
dec_recursion(void)139 static void dec_recursion(void)
140 {
141 struct thread_specific_data *tsd = thread_get_tsd();
142
143 assert(tsd->syscall_recursion);
144 tsd->syscall_recursion--;
145 }
146
user_ta_enter(struct ts_session * session,enum utee_entry_func func,uint32_t cmd)147 static TEE_Result user_ta_enter(struct ts_session *session,
148 enum utee_entry_func func, uint32_t cmd)
149 {
150 TEE_Result res = TEE_SUCCESS;
151 struct utee_params *usr_params = NULL;
152 uaddr_t usr_stack = 0;
153 struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
154 struct tee_ta_session *ta_sess = to_ta_session(session);
155 struct ts_session *ts_sess __maybe_unused = NULL;
156 void *param_va[TEE_NUM_PARAMS] = { NULL };
157
158 if (!inc_recursion()) {
159 /* Using this error code since we've run out of resources. */
160 res = TEE_ERROR_OUT_OF_MEMORY;
161 goto out_clr_cancel;
162 }
163 if (ta_sess->param) {
164 /* Map user space memory */
165 res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
166 if (res != TEE_SUCCESS)
167 goto out;
168 }
169
170 /* Switch to user ctx */
171 ts_push_current_session(session);
172
173 /* Make room for usr_params at top of stack */
174 usr_stack = utc->uctx.stack_ptr;
175 usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
176 usr_params = (struct utee_params *)usr_stack;
177 if (ta_sess->param)
178 res = init_utee_param(usr_params, ta_sess->param, param_va);
179 else
180 res = clear_user(usr_params, sizeof(*usr_params));
181
182 if (res)
183 goto out_pop_session;
184
185 res = thread_enter_user_mode(func, kaddr_to_uref(session),
186 (vaddr_t)usr_params, cmd, usr_stack,
187 utc->uctx.entry_func, utc->uctx.is_32bit,
188 &utc->ta_ctx.panicked,
189 &utc->ta_ctx.panic_code);
190
191 thread_user_clear_vfp(&utc->uctx);
192
193 if (utc->ta_ctx.panicked) {
194 abort_print_current_ts();
195 DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
196 utc->ta_ctx.panic_code);
197 res = TEE_ERROR_TARGET_DEAD;
198 } else {
199 /*
200 * According to GP spec the origin should allways be set to
201 * the TA after TA execution
202 */
203 ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
204 }
205
206 if (ta_sess->param) {
207 /* Copy out value results */
208 update_from_utee_param(ta_sess->param, usr_params);
209 }
210
211 out_pop_session:
212 if (ta_sess->param) {
213 /*
214 * Clear out the parameter mappings added with
215 * vm_clean_param() above.
216 */
217 vm_clean_param(&utc->uctx);
218 }
219 ts_sess = ts_pop_current_session();
220 assert(ts_sess == session);
221
222 out:
223 dec_recursion();
224 out_clr_cancel:
225 /*
226 * Reset the cancel state now that the user TA has returned. The next
227 * time the TA will be invoked will be with a new operation and should
228 * not have an old cancellation pending.
229 */
230 ta_sess->cancel = false;
231 ta_sess->cancel_mask = true;
232
233 return res;
234 }
235
user_ta_enter_open_session(struct ts_session * s)236 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
237 {
238 return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
239 }
240
user_ta_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)241 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
242 {
243 return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
244 }
245
user_ta_enter_close_session(struct ts_session * s)246 static void user_ta_enter_close_session(struct ts_session *s)
247 {
248 /* Only if the TA was fully initialized by ldelf */
249 if (!to_user_ta_ctx(s->ctx)->ta_ctx.is_initializing)
250 user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
251 }
252
253 #if defined(CFG_TA_STATS)
user_ta_enter_dump_memstats(struct ts_session * s)254 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
255 {
256 return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
257 }
258 #endif
259
dump_state_no_ldelf_dbg(struct user_ta_ctx * utc)260 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
261 {
262 user_mode_ctx_print_mappings(&utc->uctx);
263 }
264
user_ta_dump_state(struct ts_ctx * ctx)265 static void user_ta_dump_state(struct ts_ctx *ctx)
266 {
267 struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
268
269 if (utc->uctx.dump_entry_func) {
270 TEE_Result res = ldelf_dump_state(&utc->uctx);
271
272 if (!res || res == TEE_ERROR_TARGET_DEAD)
273 return;
274 /*
275 * Fall back to dump_state_no_ldelf_dbg() if
276 * ldelf_dump_state() fails for some reason.
277 *
278 * If ldelf_dump_state() failed with panic
279 * we are done since abort_print_current_ts() will be
280 * called which will dump the memory map.
281 */
282 }
283
284 dump_state_no_ldelf_dbg(utc);
285 }
286
287 #ifdef CFG_FTRACE_SUPPORT
user_ta_dump_ftrace(struct ts_ctx * ctx)288 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
289 {
290 uint32_t prot = TEE_MATTR_URW;
291 struct ts_session *sess = ts_get_current_session();
292 struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
293 struct thread_param params[3] = { };
294 TEE_Result res = TEE_SUCCESS;
295 struct mobj *mobj = NULL;
296 uint8_t *ubuf = NULL;
297 void *buf = NULL;
298 size_t pl_sz = 0;
299 size_t blen = 0, ld_addr_len = 0;
300 uint32_t dump_id = 0;
301 vaddr_t va = 0;
302
303 res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
304 if (res != TEE_ERROR_SHORT_BUFFER)
305 return;
306
307 #define LOAD_ADDR_DUMP_SIZE 64
308 pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
309 SMALL_PAGE_SIZE);
310
311 mobj = thread_rpc_alloc_payload(pl_sz);
312 if (!mobj) {
313 EMSG("Ftrace thread_rpc_alloc_payload failed");
314 return;
315 }
316
317 buf = mobj_get_va(mobj, 0, pl_sz);
318 if (!buf)
319 goto out_free_pl;
320
321 res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
322 mobj, 0);
323 if (res)
324 goto out_free_pl;
325
326 ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
327 memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
328 ubuf += sizeof(TEE_UUID);
329
330 if (sess->fbuf)
331 dump_id = sess->fbuf->dump_id;
332 /* only print the header when this is a new dump */
333 if (!dump_id)
334 ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
335 "TEE load address @ %#"PRIxVA"\n",
336 VCORE_START_VA);
337 ubuf += ld_addr_len;
338
339 res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
340 if (res) {
341 EMSG("Ftrace dump failed: %#"PRIx32, res);
342 goto out_unmap_pl;
343 }
344
345 params[0] = THREAD_PARAM_VALUE(INOUT, dump_id, 0, 0);
346 params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
347 params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
348 blen + ld_addr_len);
349
350 res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
351 if (res) {
352 EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
353 goto out_unmap_pl;
354 }
355 if (sess->fbuf)
356 sess->fbuf->dump_id = params[0].u.value.a;
357
358 out_unmap_pl:
359 res = vm_unmap(&utc->uctx, va, mobj->size);
360 assert(!res);
361 out_free_pl:
362 thread_rpc_free_payload(mobj);
363 }
364 #endif /*CFG_FTRACE_SUPPORT*/
365
366 #ifdef CFG_TA_GPROF_SUPPORT
user_ta_gprof_set_status(enum ts_gprof_status status)367 static void user_ta_gprof_set_status(enum ts_gprof_status status)
368 {
369 if (status == TS_GPROF_SUSPEND)
370 tee_ta_update_session_utime_suspend();
371 else
372 tee_ta_update_session_utime_resume();
373 }
374 #endif /*CFG_TA_GPROF_SUPPORT*/
375
376
release_utc_state(struct user_ta_ctx * utc)377 static void release_utc_state(struct user_ta_ctx *utc)
378 {
379 /*
380 * Close sessions opened by this TA
381 * Note that tee_ta_close_session() removes the item
382 * from the utc->open_sessions list.
383 */
384 while (!TAILQ_EMPTY(&utc->open_sessions)) {
385 tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
386 &utc->open_sessions, KERN_IDENTITY);
387 }
388
389 vm_info_final(&utc->uctx);
390
391 /* Free cryp states created by this TA */
392 tee_svc_cryp_free_states(utc);
393 /* Close cryp objects opened by this TA */
394 tee_obj_close_all(utc);
395 /* Free emums created by this TA */
396 tee_svc_storage_close_all_enum(utc);
397 }
398
free_utc(struct user_ta_ctx * utc)399 static void free_utc(struct user_ta_ctx *utc)
400 {
401 release_utc_state(utc);
402 free(utc);
403 }
404
user_ta_release_state(struct ts_ctx * ctx)405 static void user_ta_release_state(struct ts_ctx *ctx)
406 {
407 release_utc_state(to_user_ta_ctx(ctx));
408 }
409
user_ta_ctx_destroy(struct ts_ctx * ctx)410 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
411 {
412 free_utc(to_user_ta_ctx(ctx));
413 }
414
user_ta_get_instance_id(struct ts_ctx * ctx)415 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
416 {
417 return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
418 }
419
420 /*
421 * Note: this variable is weak just to ease breaking its dependency chain
422 * when added to the unpaged area.
423 */
424 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
425 .enter_open_session = user_ta_enter_open_session,
426 .enter_invoke_cmd = user_ta_enter_invoke_cmd,
427 .enter_close_session = user_ta_enter_close_session,
428 #if defined(CFG_TA_STATS)
429 .dump_mem_stats = user_ta_enter_dump_memstats,
430 #endif
431 .dump_state = user_ta_dump_state,
432 #ifdef CFG_FTRACE_SUPPORT
433 .dump_ftrace = user_ta_dump_ftrace,
434 #endif
435 .release_state = user_ta_release_state,
436 .destroy = user_ta_ctx_destroy,
437 .get_instance_id = user_ta_get_instance_id,
438 .handle_scall = scall_handle_user_ta,
439 #ifdef CFG_TA_GPROF_SUPPORT
440 .gprof_set_status = user_ta_gprof_set_status,
441 #endif
442 };
443
set_ta_ctx_ops(struct tee_ta_ctx * ctx)444 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
445 {
446 ctx->ts_ctx.ops = &user_ta_ops;
447 }
448
is_user_ta_ctx(struct ts_ctx * ctx)449 bool __noprof is_user_ta_ctx(struct ts_ctx *ctx)
450 {
451 return ctx && ctx->ops == &user_ta_ops;
452 }
453
check_ta_store(void)454 static TEE_Result check_ta_store(void)
455 {
456 const struct ts_store_ops *op = NULL;
457
458 SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
459 DMSG("TA store: \"%s\"", op->description);
460
461 return TEE_SUCCESS;
462 }
463 service_init(check_ta_store);
464
tee_ta_init_user_ta_session(const TEE_UUID * uuid,struct tee_ta_session * s)465 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
466 struct tee_ta_session *s)
467 {
468 TEE_Result res = TEE_SUCCESS;
469 struct user_ta_ctx *utc = NULL;
470
471 /*
472 * Caller is expected to hold tee_ta_mutex for safe changes
473 * in @s and registering of the context in tee_ctxes list.
474 */
475 assert(mutex_is_locked(&tee_ta_mutex));
476
477 utc = calloc(1, sizeof(struct user_ta_ctx));
478 if (!utc)
479 return TEE_ERROR_OUT_OF_MEMORY;
480
481 #ifdef CFG_TA_PAUTH
482 res = crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
483 if (res) {
484 free(utc);
485 return res;
486 }
487 #endif
488
489 TAILQ_INIT(&utc->open_sessions);
490 TAILQ_INIT(&utc->cryp_states);
491 TAILQ_INIT(&utc->objects);
492 TAILQ_INIT(&utc->storage_enums);
493 condvar_init(&utc->ta_ctx.busy_cv);
494 utc->ta_ctx.ref_count = 1;
495
496 /*
497 * Set context TA operation structure. It is required by generic
498 * implementation to identify userland TA versus pseudo TA contexts.
499 */
500 set_ta_ctx_ops(&utc->ta_ctx);
501
502 utc->ta_ctx.ts_ctx.uuid = *uuid;
503 res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
504 if (res) {
505 condvar_destroy(&utc->ta_ctx.busy_cv);
506 free_utc(utc);
507 return res;
508 }
509
510 utc->ta_ctx.is_initializing = true;
511
512 assert(!mutex_trylock(&tee_ta_mutex));
513
514 s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
515 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
516 /*
517 * Another thread trying to load this same TA may need to wait
518 * until this context is fully initialized. This is needed to
519 * handle single instance TAs.
520 */
521 TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
522
523 return TEE_SUCCESS;
524 }
525
tee_ta_complete_user_ta_session(struct tee_ta_session * s)526 TEE_Result tee_ta_complete_user_ta_session(struct tee_ta_session *s)
527 {
528 struct user_ta_ctx *utc = to_user_ta_ctx(s->ts_sess.ctx);
529 TEE_Result res = TEE_SUCCESS;
530
531 /*
532 * We must not hold tee_ta_mutex while allocating page tables as
533 * that may otherwise lead to a deadlock.
534 */
535 ts_push_current_session(&s->ts_sess);
536
537 res = ldelf_load_ldelf(&utc->uctx);
538 if (!res)
539 res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
540
541 ts_pop_current_session();
542
543 mutex_lock(&tee_ta_mutex);
544
545 if (!res) {
546 utc->ta_ctx.is_initializing = false;
547 } else {
548 s->ts_sess.ctx = NULL;
549 TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
550 condvar_destroy(&utc->ta_ctx.busy_cv);
551 free_utc(utc);
552 }
553
554 /* The state has changed for the context, notify eventual waiters. */
555 condvar_broadcast(&tee_ta_init_cv);
556
557 mutex_unlock(&tee_ta_mutex);
558
559 return res;
560 }
561