xref: /optee_os/core/kernel/user_ta.c (revision 614b28146e96edda3e92c316eb78b592e00072c2)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2022 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <initcall.h>
12 #include <keep.h>
13 #include <kernel/ldelf_loader.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/scall.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/ts_store.h>
20 #include <kernel/user_access.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <mm/file.h>
26 #include <mm/fobj.h>
27 #include <mm/mobj.h>
28 #include <mm/pgt_cache.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <mm/vm.h>
32 #include <optee_rpc_cmd.h>
33 #include <printk.h>
34 #include <signed_hdr.h>
35 #include <stdlib.h>
36 #include <sys/queue.h>
37 #include <tee/tee_cryp_utl.h>
38 #include <tee/tee_obj.h>
39 #include <tee/tee_svc_cryp.h>
40 #include <tee/tee_svc_storage.h>
41 #include <trace.h>
42 #include <types_ext.h>
43 #include <utee_defines.h>
44 #include <util.h>
45 
init_utee_param(struct utee_params * up,const struct tee_ta_param * p,void * va[TEE_NUM_PARAMS])46 static TEE_Result init_utee_param(struct utee_params *up,
47 				  const struct tee_ta_param *p,
48 				  void *va[TEE_NUM_PARAMS])
49 {
50 	TEE_Result res = TEE_SUCCESS;
51 	size_t n = 0;
52 	struct utee_params *up_bbuf = NULL;
53 
54 	up_bbuf = bb_alloc(sizeof(struct utee_params));
55 	if (!up_bbuf)
56 		return TEE_ERROR_OUT_OF_MEMORY;
57 
58 	up_bbuf->types = p->types;
59 
60 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
61 		uintptr_t a;
62 		uintptr_t b;
63 
64 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
65 		case TEE_PARAM_TYPE_MEMREF_INPUT:
66 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
67 		case TEE_PARAM_TYPE_MEMREF_INOUT:
68 			a = (uintptr_t)va[n];
69 			b = p->u[n].mem.size;
70 			break;
71 		case TEE_PARAM_TYPE_VALUE_INPUT:
72 		case TEE_PARAM_TYPE_VALUE_INOUT:
73 			a = p->u[n].val.a;
74 			b = p->u[n].val.b;
75 			break;
76 		default:
77 			a = 0;
78 			b = 0;
79 			break;
80 		}
81 		/* See comment for struct utee_params in utee_types.h */
82 		up_bbuf->vals[n * 2] = a;
83 		up_bbuf->vals[n * 2 + 1] = b;
84 	}
85 
86 	res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
87 
88 	bb_free(up_bbuf, sizeof(struct utee_params));
89 
90 	return res;
91 }
92 
update_from_utee_param(struct tee_ta_param * p,const struct utee_params * up)93 static void update_from_utee_param(struct tee_ta_param *p,
94 				   const struct utee_params *up)
95 {
96 	TEE_Result res = TEE_SUCCESS;
97 	size_t n = 0;
98 	struct utee_params *up_bbuf = NULL;
99 
100 	res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
101 	if (res)
102 		return;
103 
104 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
105 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
106 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
107 		case TEE_PARAM_TYPE_MEMREF_INOUT:
108 			/* See comment for struct utee_params in utee_types.h */
109 			p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
110 			break;
111 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
112 		case TEE_PARAM_TYPE_VALUE_INOUT:
113 			/* See comment for struct utee_params in utee_types.h */
114 			p->u[n].val.a = up_bbuf->vals[n * 2];
115 			p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
116 			break;
117 		default:
118 			break;
119 		}
120 	}
121 
122 	bb_free(up_bbuf, sizeof(*up));
123 }
124 
inc_recursion(void)125 static bool inc_recursion(void)
126 {
127 	struct thread_specific_data *tsd = thread_get_tsd();
128 
129 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
130 		DMSG("Maximum allowed recursion depth reached (%u)",
131 		     CFG_CORE_MAX_SYSCALL_RECURSION);
132 		return false;
133 	}
134 
135 	tsd->syscall_recursion++;
136 	return true;
137 }
138 
dec_recursion(void)139 static void dec_recursion(void)
140 {
141 	struct thread_specific_data *tsd = thread_get_tsd();
142 
143 	assert(tsd->syscall_recursion);
144 	tsd->syscall_recursion--;
145 }
146 
user_ta_enter(struct ts_session * session,enum utee_entry_func func,uint32_t cmd)147 static TEE_Result user_ta_enter(struct ts_session *session,
148 				enum utee_entry_func func, uint32_t cmd)
149 {
150 	TEE_Result res = TEE_SUCCESS;
151 	struct utee_params *usr_params = NULL;
152 	uaddr_t usr_stack = 0;
153 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
154 	struct tee_ta_session *ta_sess = to_ta_session(session);
155 	struct ts_session *ts_sess __maybe_unused = NULL;
156 	void *param_va[TEE_NUM_PARAMS] = { NULL };
157 
158 	if (!inc_recursion()) {
159 		/* Using this error code since we've run out of resources. */
160 		res = TEE_ERROR_OUT_OF_MEMORY;
161 		goto out_clr_cancel;
162 	}
163 	if (ta_sess->param) {
164 		/* Map user space memory */
165 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
166 		if (res != TEE_SUCCESS)
167 			goto out;
168 	}
169 
170 	/* Switch to user ctx */
171 	ts_push_current_session(session);
172 
173 	/* Make room for usr_params at top of stack */
174 	usr_stack = utc->uctx.stack_ptr;
175 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
176 	usr_params = (struct utee_params *)usr_stack;
177 	if (ta_sess->param)
178 		res = init_utee_param(usr_params, ta_sess->param, param_va);
179 	else
180 		res = clear_user(usr_params, sizeof(*usr_params));
181 
182 	if (res)
183 		goto out_pop_session;
184 
185 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
186 				     (vaddr_t)usr_params, cmd, usr_stack,
187 				     utc->uctx.entry_func, utc->uctx.is_32bit,
188 				     &utc->ta_ctx.panicked,
189 				     &utc->ta_ctx.panic_code);
190 
191 	thread_user_clear_vfp(&utc->uctx);
192 
193 	if (utc->ta_ctx.panicked) {
194 		abort_print_current_ts();
195 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
196 		     utc->ta_ctx.panic_code);
197 		res = TEE_ERROR_TARGET_DEAD;
198 	} else {
199 		/*
200 		 * According to GP spec the origin should allways be set to
201 		 * the TA after TA execution
202 		 */
203 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
204 	}
205 
206 	if (ta_sess->param) {
207 		/* Copy out value results */
208 		update_from_utee_param(ta_sess->param, usr_params);
209 	}
210 
211 out_pop_session:
212 	if (ta_sess->param) {
213 		/*
214 		 * Clear out the parameter mappings added with
215 		 * vm_clean_param() above.
216 		 */
217 		vm_clean_param(&utc->uctx);
218 	}
219 	ts_sess = ts_pop_current_session();
220 	assert(ts_sess == session);
221 
222 out:
223 	dec_recursion();
224 out_clr_cancel:
225 	/*
226 	 * Reset the cancel state now that the user TA has returned. The next
227 	 * time the TA will be invoked will be with a new operation and should
228 	 * not have an old cancellation pending.
229 	 */
230 	ta_sess->cancel = false;
231 	ta_sess->cancel_mask = true;
232 
233 	return res;
234 }
235 
user_ta_enter_open_session(struct ts_session * s)236 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
237 {
238 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
239 }
240 
user_ta_enter_invoke_cmd(struct ts_session * s,uint32_t cmd)241 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
242 {
243 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
244 }
245 
user_ta_enter_close_session(struct ts_session * s)246 static void user_ta_enter_close_session(struct ts_session *s)
247 {
248 	/* Only if the TA was fully initialized by ldelf */
249 	if (!to_user_ta_ctx(s->ctx)->ta_ctx.is_initializing)
250 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
251 }
252 
253 #if defined(CFG_TA_STATS)
user_ta_enter_dump_memstats(struct ts_session * s)254 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
255 {
256 	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
257 }
258 #endif
259 
dump_state_no_ldelf_dbg(struct user_ta_ctx * utc)260 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
261 {
262 	user_mode_ctx_print_mappings(&utc->uctx);
263 }
264 
user_ta_dump_state(struct ts_ctx * ctx)265 static void user_ta_dump_state(struct ts_ctx *ctx)
266 {
267 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
268 
269 	if (utc->uctx.dump_entry_func) {
270 		TEE_Result res = ldelf_dump_state(&utc->uctx);
271 
272 		if (!res || res == TEE_ERROR_TARGET_DEAD)
273 			return;
274 		/*
275 		 * Fall back to dump_state_no_ldelf_dbg() if
276 		 * ldelf_dump_state() fails for some reason.
277 		 *
278 		 * If ldelf_dump_state() failed with panic
279 		 * we are done since abort_print_current_ts() will be
280 		 * called which will dump the memory map.
281 		 */
282 	}
283 
284 	dump_state_no_ldelf_dbg(utc);
285 }
286 
287 #ifdef CFG_FTRACE_SUPPORT
user_ta_dump_ftrace(struct ts_ctx * ctx)288 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
289 {
290 	uint32_t prot = TEE_MATTR_URW;
291 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
292 	struct thread_param params[3] = { };
293 	TEE_Result res = TEE_SUCCESS;
294 	struct mobj *mobj = NULL;
295 	uint8_t *ubuf = NULL;
296 	void *buf = NULL;
297 	size_t pl_sz = 0;
298 	size_t blen = 0, ld_addr_len = 0;
299 	vaddr_t va = 0;
300 
301 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
302 	if (res != TEE_ERROR_SHORT_BUFFER)
303 		return;
304 
305 #define LOAD_ADDR_DUMP_SIZE	64
306 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
307 			SMALL_PAGE_SIZE);
308 
309 	mobj = thread_rpc_alloc_payload(pl_sz);
310 	if (!mobj) {
311 		EMSG("Ftrace thread_rpc_alloc_payload failed");
312 		return;
313 	}
314 
315 	buf = mobj_get_va(mobj, 0, pl_sz);
316 	if (!buf)
317 		goto out_free_pl;
318 
319 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
320 		     mobj, 0);
321 	if (res)
322 		goto out_free_pl;
323 
324 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
325 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
326 	ubuf += sizeof(TEE_UUID);
327 
328 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
329 			       "TEE load address @ %#"PRIxVA"\n",
330 			       VCORE_START_VA);
331 	ubuf += ld_addr_len;
332 
333 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
334 	if (res) {
335 		EMSG("Ftrace dump failed: %#"PRIx32, res);
336 		goto out_unmap_pl;
337 	}
338 
339 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
340 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
341 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
342 					blen + ld_addr_len);
343 
344 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
345 	if (res)
346 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
347 
348 out_unmap_pl:
349 	res = vm_unmap(&utc->uctx, va, mobj->size);
350 	assert(!res);
351 out_free_pl:
352 	thread_rpc_free_payload(mobj);
353 }
354 #endif /*CFG_FTRACE_SUPPORT*/
355 
356 #ifdef CFG_TA_GPROF_SUPPORT
user_ta_gprof_set_status(enum ts_gprof_status status)357 static void user_ta_gprof_set_status(enum ts_gprof_status status)
358 {
359 	if (status == TS_GPROF_SUSPEND)
360 		tee_ta_update_session_utime_suspend();
361 	else
362 		tee_ta_update_session_utime_resume();
363 }
364 #endif /*CFG_TA_GPROF_SUPPORT*/
365 
366 
release_utc_state(struct user_ta_ctx * utc)367 static void release_utc_state(struct user_ta_ctx *utc)
368 {
369 	/*
370 	 * Close sessions opened by this TA
371 	 * Note that tee_ta_close_session() removes the item
372 	 * from the utc->open_sessions list.
373 	 */
374 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
375 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
376 				     &utc->open_sessions, KERN_IDENTITY);
377 	}
378 
379 	vm_info_final(&utc->uctx);
380 
381 	/* Free cryp states created by this TA */
382 	tee_svc_cryp_free_states(utc);
383 	/* Close cryp objects opened by this TA */
384 	tee_obj_close_all(utc);
385 	/* Free emums created by this TA */
386 	tee_svc_storage_close_all_enum(utc);
387 }
388 
free_utc(struct user_ta_ctx * utc)389 static void free_utc(struct user_ta_ctx *utc)
390 {
391 	release_utc_state(utc);
392 	free(utc);
393 }
394 
user_ta_release_state(struct ts_ctx * ctx)395 static void user_ta_release_state(struct ts_ctx *ctx)
396 {
397 	release_utc_state(to_user_ta_ctx(ctx));
398 }
399 
user_ta_ctx_destroy(struct ts_ctx * ctx)400 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
401 {
402 	free_utc(to_user_ta_ctx(ctx));
403 }
404 
user_ta_get_instance_id(struct ts_ctx * ctx)405 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
406 {
407 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
408 }
409 
410 /*
411  * Note: this variable is weak just to ease breaking its dependency chain
412  * when added to the unpaged area.
413  */
414 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
415 	.enter_open_session = user_ta_enter_open_session,
416 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
417 	.enter_close_session = user_ta_enter_close_session,
418 #if defined(CFG_TA_STATS)
419 	.dump_mem_stats = user_ta_enter_dump_memstats,
420 #endif
421 	.dump_state = user_ta_dump_state,
422 #ifdef CFG_FTRACE_SUPPORT
423 	.dump_ftrace = user_ta_dump_ftrace,
424 #endif
425 	.release_state = user_ta_release_state,
426 	.destroy = user_ta_ctx_destroy,
427 	.get_instance_id = user_ta_get_instance_id,
428 	.handle_scall = scall_handle_user_ta,
429 #ifdef CFG_TA_GPROF_SUPPORT
430 	.gprof_set_status = user_ta_gprof_set_status,
431 #endif
432 };
433 
set_ta_ctx_ops(struct tee_ta_ctx * ctx)434 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
435 {
436 	ctx->ts_ctx.ops = &user_ta_ops;
437 }
438 
is_user_ta_ctx(struct ts_ctx * ctx)439 bool __noprof is_user_ta_ctx(struct ts_ctx *ctx)
440 {
441 	return ctx && ctx->ops == &user_ta_ops;
442 }
443 
check_ta_store(void)444 static TEE_Result check_ta_store(void)
445 {
446 	const struct ts_store_ops *op = NULL;
447 
448 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
449 		DMSG("TA store: \"%s\"", op->description);
450 
451 	return TEE_SUCCESS;
452 }
453 service_init(check_ta_store);
454 
tee_ta_init_user_ta_session(const TEE_UUID * uuid,struct tee_ta_session * s)455 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
456 				       struct tee_ta_session *s)
457 {
458 	TEE_Result res = TEE_SUCCESS;
459 	struct user_ta_ctx *utc = NULL;
460 
461 	/*
462 	 * Caller is expected to hold tee_ta_mutex for safe changes
463 	 * in @s and registering of the context in tee_ctxes list.
464 	 */
465 	assert(mutex_is_locked(&tee_ta_mutex));
466 
467 	utc = calloc(1, sizeof(struct user_ta_ctx));
468 	if (!utc)
469 		return TEE_ERROR_OUT_OF_MEMORY;
470 
471 #ifdef CFG_TA_PAUTH
472 	res = crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
473 	if (res) {
474 		free(utc);
475 		return res;
476 	}
477 #endif
478 
479 	TAILQ_INIT(&utc->open_sessions);
480 	TAILQ_INIT(&utc->cryp_states);
481 	TAILQ_INIT(&utc->objects);
482 	TAILQ_INIT(&utc->storage_enums);
483 	condvar_init(&utc->ta_ctx.busy_cv);
484 	utc->ta_ctx.ref_count = 1;
485 
486 	/*
487 	 * Set context TA operation structure. It is required by generic
488 	 * implementation to identify userland TA versus pseudo TA contexts.
489 	 */
490 	set_ta_ctx_ops(&utc->ta_ctx);
491 
492 	utc->ta_ctx.ts_ctx.uuid = *uuid;
493 	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
494 	if (res) {
495 		condvar_destroy(&utc->ta_ctx.busy_cv);
496 		free_utc(utc);
497 		return res;
498 	}
499 
500 	utc->ta_ctx.is_initializing = true;
501 
502 	assert(!mutex_trylock(&tee_ta_mutex));
503 
504 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
505 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
506 	/*
507 	 * Another thread trying to load this same TA may need to wait
508 	 * until this context is fully initialized. This is needed to
509 	 * handle single instance TAs.
510 	 */
511 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
512 
513 	return TEE_SUCCESS;
514 }
515 
tee_ta_complete_user_ta_session(struct tee_ta_session * s)516 TEE_Result tee_ta_complete_user_ta_session(struct tee_ta_session *s)
517 {
518 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ts_sess.ctx);
519 	TEE_Result res = TEE_SUCCESS;
520 
521 	/*
522 	 * We must not hold tee_ta_mutex while allocating page tables as
523 	 * that may otherwise lead to a deadlock.
524 	 */
525 	ts_push_current_session(&s->ts_sess);
526 
527 	res = ldelf_load_ldelf(&utc->uctx);
528 	if (!res)
529 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
530 
531 	ts_pop_current_session();
532 
533 	mutex_lock(&tee_ta_mutex);
534 
535 	if (!res) {
536 		utc->ta_ctx.is_initializing = false;
537 	} else {
538 		s->ts_sess.ctx = NULL;
539 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
540 		condvar_destroy(&utc->ta_ctx.busy_cv);
541 		free_utc(utc);
542 	}
543 
544 	/* The state has changed for the context, notify eventual waiters. */
545 	condvar_broadcast(&tee_ta_init_cv);
546 
547 	mutex_unlock(&tee_ta_mutex);
548 
549 	return res;
550 }
551