1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2022 Linaro Limited
5 * Copyright (c) 2020, Arm Limited.
6 */
7
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <initcall.h>
12 #include <keep.h>
13 #include <kernel/ldelf_loader.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/scall.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/ts_store.h>
20 #include <kernel/user_access.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <mm/file.h>
26 #include <mm/fobj.h>
27 #include <mm/mobj.h>
28 #include <mm/pgt_cache.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <mm/vm.h>
32 #include <optee_rpc_cmd.h>
33 #include <printk.h>
34 #include <signed_hdr.h>
35 #include <stdlib.h>
36 #include <string_ext.h>
37 #include <sys/queue.h>
38 #include <tee/tee_cryp_utl.h>
39 #include <tee/tee_obj.h>
40 #include <tee/tee_svc_cryp.h>
41 #include <tee/tee_svc_storage.h>
42 #include <trace.h>
43 #include <types_ext.h>
44 #include <utee_defines.h>
45 #include <util.h>
46
init_utee_param(struct utee_params * up,const struct tee_ta_param * p,void * va[TEE_NUM_PARAMS])47 static TEE_Result init_utee_param(struct utee_params *up,
48 const struct tee_ta_param *p,
49 void *va[TEE_NUM_PARAMS])
50 {
51 TEE_Result res = TEE_SUCCESS;
52 size_t n = 0;
53 struct utee_params *up_bbuf = NULL;
54
55 up_bbuf = bb_alloc(sizeof(struct utee_params));
56 if (!up_bbuf)
57 return TEE_ERROR_OUT_OF_MEMORY;
58
59 up_bbuf->types = p->types;
60
61 for (n = 0; n < TEE_NUM_PARAMS; n++) {
62 uintptr_t a;
63 uintptr_t b;
64
65 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
66 case TEE_PARAM_TYPE_MEMREF_INPUT:
67 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
68 case TEE_PARAM_TYPE_MEMREF_INOUT:
69 a = (uintptr_t)va[n];
70 b = p->u[n].mem.size;
71 break;
72 case TEE_PARAM_TYPE_VALUE_INPUT:
73 case TEE_PARAM_TYPE_VALUE_INOUT:
74 a = p->u[n].val.a;
75 b = p->u[n].val.b;
76 break;
77 default:
78 a = 0;
79 b = 0;
80 break;
81 }
82 /* See comment for struct utee_params in utee_types.h */
83 up_bbuf->vals[n * 2] = a;
84 up_bbuf->vals[n * 2 + 1] = b;
85 }
86
87 res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
88
89 bb_free(up_bbuf, sizeof(struct utee_params));
90
91 return res;
92 }
93
update_from_utee_param(struct tee_ta_param * p,const struct utee_params * up)94 static void update_from_utee_param(struct tee_ta_param *p,
95 const struct utee_params *up)
96 {
97 TEE_Result res = TEE_SUCCESS;
98 size_t n = 0;
99 struct utee_params *up_bbuf = NULL;
100
101 res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
102 if (res)
103 return;
104
105 for (n = 0; n < TEE_NUM_PARAMS; n++) {
106 switch (TEE_PARAM_TYPE_GET(p->types, n)) {
107 case TEE_PARAM_TYPE_MEMREF_OUTPUT:
108 case TEE_PARAM_TYPE_MEMREF_INOUT:
109 /* See comment for struct utee_params in utee_types.h */
110 p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
111 break;
112 case TEE_PARAM_TYPE_VALUE_OUTPUT:
113 case TEE_PARAM_TYPE_VALUE_INOUT:
114 /* See comment for struct utee_params in utee_types.h */
115 p->u[n].val.a = up_bbuf->vals[n * 2];
116 p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
117 break;
118 default:
119 break;
120 }
121 }
122
123 bb_free(up_bbuf, sizeof(*up));
124 }
125
inc_recursion(void)126 static bool inc_recursion(void)
127 {
128 struct thread_specific_data *tsd = thread_get_tsd();
129
130 if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
131 DMSG("Maximum allowed recursion depth reached (%u)",
132 CFG_CORE_MAX_SYSCALL_RECURSION);
133 return false;
134 }
135
136 tsd->syscall_recursion++;
137 return true;
138 }
139
dec_recursion(void)140 static void dec_recursion(void)
141 {
142 struct thread_specific_data *tsd = thread_get_tsd();
143
144 assert(tsd->syscall_recursion);
145 tsd->syscall_recursion--;
146 }
147
user_ta_enter(struct ts_session * session,enum utee_entry_func func,uint32_t cmd)148 static TEE_Result user_ta_enter(struct ts_session *session,
149 enum utee_entry_func func, uint32_t cmd)
150 {
151 TEE_Result res = TEE_SUCCESS;
152 struct utee_params *usr_params = NULL;
153 uaddr_t usr_stack = 0;
154 struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
155 struct tee_ta_session *ta_sess = to_ta_session(session);
156 struct ts_session *ts_sess __maybe_unused = NULL;
157 void *param_va[TEE_NUM_PARAMS] = { NULL };
158
159 if (!inc_recursion()) {
160 /* Using this error code since we've run out of resources. */
161 res = TEE_ERROR_OUT_OF_MEMORY;
162 goto out_clr_cancel;
163 }
164 if (ta_sess->param) {
165 /* Map user space memory */
166 res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
167 if (res != TEE_SUCCESS)
168 goto out;
169 }
170
171 /* Switch to user ctx */
172 ts_push_current_session(session);
173
174 /* Make room for usr_params at top of stack */
175 usr_stack = utc->uctx.stack_ptr;
176 usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
177 usr_params = (struct utee_params *)usr_stack;
178 if (ta_sess->param)
179 res = init_utee_param(usr_params, ta_sess->param, param_va);
180 else
181 res = clear_user(usr_params, sizeof(*usr_params));
182
183 if (res)
184 goto out_pop_session;
185
186 res = thread_enter_user_mode(func, kaddr_to_uref(session),
187 (vaddr_t)usr_params, cmd, usr_stack,
188 utc->uctx.entry_func, utc->uctx.is_32bit,
189 &utc->ta_ctx.panicked,
190 &utc->ta_ctx.panic_code);
191
192 thread_user_clear_vfp(&utc->uctx);
193
194 if (utc->ta_ctx.panicked) {
195 abort_print_current_ts();
196 DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
197 utc->ta_ctx.panic_code);
198 res = TEE_ERROR_TARGET_DEAD;
199 } else {
200 /*
201 * According to GP spec the origin should allways be set to
202 * the TA after TA execution
203 */
204 ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
205 }
206
207 if (ta_sess->param) {
208 /* Copy out value results */
209 update_from_utee_param(ta_sess->param, usr_params);
210 }
211
212 out_pop_session:
213 if (ta_sess->param) {
214 /*
215 * Clear out the parameter mappings added with
216 * vm_clean_param() above.
217 */
218 vm_clean_param(&utc->uctx);
219 }
220 ts_sess = ts_pop_current_session();
221 assert(ts_sess == session);
222
223 out:
224 dec_recursion();
225 out_clr_cancel:
226 /*
227 * Reset the cancel state now that the user TA has returned. The next
228 * time the TA will be invoked will be with a new operation and should
229 * not have an old cancellation pending.
230 */
231 ta_sess->cancel = false;
232 ta_sess->cancel_mask = true;
233
234 return res;
235 }
236
user_ta_enter_open_session(struct ts_session * s)237 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
238 {
239 return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
240 }
241
user_ta_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)242 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
243 {
244 return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
245 }
246
user_ta_enter_close_session(struct ts_session * s)247 static void user_ta_enter_close_session(struct ts_session *s)
248 {
249 /* Only if the TA was fully initialized by ldelf */
250 if (!to_user_ta_ctx(s->ctx)->ta_ctx.is_initializing)
251 user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
252 }
253
254 #if defined(CFG_TA_STATS)
user_ta_enter_dump_memstats(struct ts_session * s)255 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
256 {
257 return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
258 }
259 #endif
260
dump_state_no_ldelf_dbg(struct user_ta_ctx * utc)261 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
262 {
263 user_mode_ctx_print_mappings(&utc->uctx);
264 }
265
user_ta_dump_state(struct ts_ctx * ctx)266 static void user_ta_dump_state(struct ts_ctx *ctx)
267 {
268 struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
269
270 if (utc->uctx.dump_entry_func) {
271 TEE_Result res = ldelf_dump_state(&utc->uctx);
272
273 if (!res || res == TEE_ERROR_TARGET_DEAD)
274 return;
275 /*
276 * Fall back to dump_state_no_ldelf_dbg() if
277 * ldelf_dump_state() fails for some reason.
278 *
279 * If ldelf_dump_state() failed with panic
280 * we are done since abort_print_current_ts() will be
281 * called which will dump the memory map.
282 */
283 }
284
285 dump_state_no_ldelf_dbg(utc);
286 }
287
288 #ifdef CFG_FTRACE_SUPPORT
user_ta_dump_ftrace(struct ts_ctx * ctx)289 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
290 {
291 uint32_t prot = TEE_MATTR_URW;
292 struct ts_session *sess = ts_get_current_session();
293 struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
294 struct thread_param params[3] = { };
295 TEE_Result res = TEE_SUCCESS;
296 struct mobj *mobj = NULL;
297 uint8_t *ubuf = NULL;
298 void *buf = NULL;
299 size_t pl_sz = 0;
300 size_t blen = 0, ld_addr_len = 0;
301 uint32_t dump_id = 0;
302 vaddr_t va = 0;
303
304 res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
305 if (res != TEE_ERROR_SHORT_BUFFER)
306 return;
307
308 #define LOAD_ADDR_DUMP_SIZE 64
309 pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
310 SMALL_PAGE_SIZE);
311
312 mobj = thread_rpc_alloc_payload(pl_sz);
313 if (!mobj) {
314 EMSG("Ftrace thread_rpc_alloc_payload failed");
315 return;
316 }
317
318 buf = mobj_get_va(mobj, 0, pl_sz);
319 if (!buf)
320 goto out_free_pl;
321
322 res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
323 mobj, 0);
324 if (res)
325 goto out_free_pl;
326
327 ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
328 memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
329 ubuf += sizeof(TEE_UUID);
330
331 if (sess->fbuf)
332 dump_id = sess->fbuf->dump_id;
333 /* only print the header when this is a new dump */
334 if (!dump_id)
335 ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
336 "TEE load address @ %#"PRIxVA"\n",
337 VCORE_START_VA);
338 ubuf += ld_addr_len;
339
340 res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
341 if (res) {
342 EMSG("Ftrace dump failed: %#"PRIx32, res);
343 goto out_unmap_pl;
344 }
345
346 params[0] = THREAD_PARAM_VALUE(INOUT, dump_id, 0, 0);
347 params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
348 params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
349 blen + ld_addr_len);
350
351 res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
352 if (res) {
353 EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
354 goto out_unmap_pl;
355 }
356 if (sess->fbuf)
357 sess->fbuf->dump_id = params[0].u.value.a;
358
359 out_unmap_pl:
360 res = vm_unmap(&utc->uctx, va, mobj->size);
361 assert(!res);
362 out_free_pl:
363 thread_rpc_free_payload(mobj);
364 }
365 #endif /*CFG_FTRACE_SUPPORT*/
366
367 #ifdef CFG_TA_GPROF_SUPPORT
user_ta_gprof_set_status(enum ts_gprof_status status)368 static void user_ta_gprof_set_status(enum ts_gprof_status status)
369 {
370 if (status == TS_GPROF_SUSPEND)
371 tee_ta_update_session_utime_suspend();
372 else
373 tee_ta_update_session_utime_resume();
374 }
375 #endif /*CFG_TA_GPROF_SUPPORT*/
376
377
release_utc_state(struct user_ta_ctx * utc)378 static void release_utc_state(struct user_ta_ctx *utc)
379 {
380 /*
381 * Close sessions opened by this TA
382 * Note that tee_ta_close_session() removes the item
383 * from the utc->open_sessions list.
384 */
385 while (!TAILQ_EMPTY(&utc->open_sessions)) {
386 tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
387 &utc->open_sessions, KERN_IDENTITY);
388 }
389
390 vm_info_final(&utc->uctx);
391
392 /* Free cryp states created by this TA */
393 tee_svc_cryp_free_states(utc);
394 /* Close cryp objects opened by this TA */
395 tee_obj_close_all(utc);
396 /* Free emums created by this TA */
397 tee_svc_storage_close_all_enum(utc);
398 }
399
free_utc(struct user_ta_ctx * utc)400 static void free_utc(struct user_ta_ctx *utc)
401 {
402 release_utc_state(utc);
403 free(utc);
404 }
405
user_ta_release_state(struct ts_ctx * ctx)406 static void user_ta_release_state(struct ts_ctx *ctx)
407 {
408 release_utc_state(to_user_ta_ctx(ctx));
409 }
410
user_ta_ctx_destroy(struct ts_ctx * ctx)411 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
412 {
413 free_utc(to_user_ta_ctx(ctx));
414 }
415
user_ta_get_instance_id(struct ts_ctx * ctx)416 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
417 {
418 return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
419 }
420
421 /*
422 * Note: this variable is weak just to ease breaking its dependency chain
423 * when added to the unpaged area.
424 */
425 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
426 .enter_open_session = user_ta_enter_open_session,
427 .enter_invoke_cmd = user_ta_enter_invoke_cmd,
428 .enter_close_session = user_ta_enter_close_session,
429 #if defined(CFG_TA_STATS)
430 .dump_mem_stats = user_ta_enter_dump_memstats,
431 #endif
432 .dump_state = user_ta_dump_state,
433 #ifdef CFG_FTRACE_SUPPORT
434 .dump_ftrace = user_ta_dump_ftrace,
435 #endif
436 .release_state = user_ta_release_state,
437 .destroy = user_ta_ctx_destroy,
438 .get_instance_id = user_ta_get_instance_id,
439 .handle_scall = scall_handle_user_ta,
440 #ifdef CFG_TA_GPROF_SUPPORT
441 .gprof_set_status = user_ta_gprof_set_status,
442 #endif
443 };
444
set_ta_ctx_ops(struct tee_ta_ctx * ctx)445 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
446 {
447 ctx->ts_ctx.ops = &user_ta_ops;
448 }
449
is_user_ta_ctx(struct ts_ctx * ctx)450 bool __noprof is_user_ta_ctx(struct ts_ctx *ctx)
451 {
452 return ctx && ctx->ops == &user_ta_ops;
453 }
454
check_ta_store(void)455 static TEE_Result check_ta_store(void)
456 {
457 const struct ts_store_ops *op = NULL;
458
459 SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
460 DMSG("TA store: \"%s\"", op->description);
461
462 return TEE_SUCCESS;
463 }
464 service_init(check_ta_store);
465
tee_ta_init_user_ta_session(const TEE_UUID * uuid,struct tee_ta_session * s)466 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
467 struct tee_ta_session *s)
468 {
469 TEE_Result res = TEE_SUCCESS;
470 struct user_ta_ctx *utc = NULL;
471 #ifdef CFG_TA_PAUTH
472 uint8_t pauth_keys[sizeof(utc->uctx.keys)] = { };
473 #endif
474
475 /*
476 * Caller is expected to hold tee_ta_mutex for safe changes
477 * in @s and registering of the context in tee_ctxes list.
478 */
479 assert(mutex_is_locked(&tee_ta_mutex));
480
481 utc = calloc(1, sizeof(struct user_ta_ctx));
482 if (!utc)
483 return TEE_ERROR_OUT_OF_MEMORY;
484
485 #ifdef CFG_TA_PAUTH
486 res = crypto_rng_read(pauth_keys, sizeof(pauth_keys));
487 if (res) {
488 free(utc);
489 return res;
490 }
491 #endif
492
493 TAILQ_INIT(&utc->open_sessions);
494 TAILQ_INIT(&utc->cryp_states);
495 TAILQ_INIT(&utc->objects);
496 TAILQ_INIT(&utc->storage_enums);
497 condvar_init(&utc->ta_ctx.busy_cv);
498 utc->ta_ctx.ref_count = 1;
499
500 /*
501 * Set context TA operation structure. It is required by generic
502 * implementation to identify userland TA versus pseudo TA contexts.
503 */
504 set_ta_ctx_ops(&utc->ta_ctx);
505
506 utc->ta_ctx.ts_ctx.uuid = *uuid;
507 res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
508 if (res) {
509 condvar_destroy(&utc->ta_ctx.busy_cv);
510 free_utc(utc);
511 return res;
512 }
513
514 utc->ta_ctx.is_initializing = true;
515
516 assert(!mutex_trylock(&tee_ta_mutex));
517
518 #ifdef CFG_TA_PAUTH
519 memcpy(&utc->uctx.keys, pauth_keys, sizeof(pauth_keys));
520 memzero_explicit(pauth_keys, sizeof(pauth_keys));
521 #endif
522
523 s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
524 s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
525 /*
526 * Another thread trying to load this same TA may need to wait
527 * until this context is fully initialized. This is needed to
528 * handle single instance TAs.
529 */
530 TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
531
532 return TEE_SUCCESS;
533 }
534
tee_ta_complete_user_ta_session(struct tee_ta_session * s)535 TEE_Result tee_ta_complete_user_ta_session(struct tee_ta_session *s)
536 {
537 struct user_ta_ctx *utc = to_user_ta_ctx(s->ts_sess.ctx);
538 TEE_Result res = TEE_SUCCESS;
539
540 /*
541 * We must not hold tee_ta_mutex while allocating page tables as
542 * that may otherwise lead to a deadlock.
543 */
544 ts_push_current_session(&s->ts_sess);
545
546 res = ldelf_load_ldelf(&utc->uctx);
547 if (!res)
548 res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
549
550 ts_pop_current_session();
551
552 mutex_lock(&tee_ta_mutex);
553
554 if (!res) {
555 utc->ta_ctx.is_initializing = false;
556 } else {
557 s->ts_sess.ctx = NULL;
558 TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
559 condvar_destroy(&utc->ta_ctx.busy_cv);
560 free_utc(utc);
561 }
562
563 /* The state has changed for the context, notify eventual waiters. */
564 condvar_broadcast(&tee_ta_init_cv);
565
566 mutex_unlock(&tee_ta_mutex);
567
568 return res;
569 }
570