xref: /optee_os/core/kernel/user_ta.c (revision 32b3180828fa15a49ccc86ecb4be9d274c140c89)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2022 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <ctype.h>
12 #include <initcall.h>
13 #include <keep.h>
14 #include <kernel/ldelf_loader.h>
15 #include <kernel/linker.h>
16 #include <kernel/panic.h>
17 #include <kernel/scall.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/tee_ta_manager.h>
20 #include <kernel/thread.h>
21 #include <kernel/ts_store.h>
22 #include <kernel/user_access.h>
23 #include <kernel/user_mode_ctx.h>
24 #include <kernel/user_ta.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/file.h>
28 #include <mm/fobj.h>
29 #include <mm/mobj.h>
30 #include <mm/pgt_cache.h>
31 #include <mm/tee_mm.h>
32 #include <mm/tee_pager.h>
33 #include <mm/vm.h>
34 #include <optee_rpc_cmd.h>
35 #include <printk.h>
36 #include <signed_hdr.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <sys/queue.h>
40 #include <ta_pub_key.h>
41 #include <tee/tee_cryp_utl.h>
42 #include <tee/tee_obj.h>
43 #include <tee/tee_svc_cryp.h>
44 #include <tee/tee_svc.h>
45 #include <tee/tee_svc_storage.h>
46 #include <tee/uuid.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
52 static TEE_Result init_utee_param(struct utee_params *up,
53 				  const struct tee_ta_param *p,
54 				  void *va[TEE_NUM_PARAMS])
55 {
56 	TEE_Result res = TEE_SUCCESS;
57 	size_t n = 0;
58 	struct utee_params *up_bbuf = NULL;
59 
60 	up_bbuf = bb_alloc(sizeof(struct utee_params));
61 	if (!up_bbuf)
62 		return TEE_ERROR_OUT_OF_MEMORY;
63 
64 	up_bbuf->types = p->types;
65 
66 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
67 		uintptr_t a;
68 		uintptr_t b;
69 
70 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
71 		case TEE_PARAM_TYPE_MEMREF_INPUT:
72 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
73 		case TEE_PARAM_TYPE_MEMREF_INOUT:
74 			a = (uintptr_t)va[n];
75 			b = p->u[n].mem.size;
76 			break;
77 		case TEE_PARAM_TYPE_VALUE_INPUT:
78 		case TEE_PARAM_TYPE_VALUE_INOUT:
79 			a = p->u[n].val.a;
80 			b = p->u[n].val.b;
81 			break;
82 		default:
83 			a = 0;
84 			b = 0;
85 			break;
86 		}
87 		/* See comment for struct utee_params in utee_types.h */
88 		up_bbuf->vals[n * 2] = a;
89 		up_bbuf->vals[n * 2 + 1] = b;
90 	}
91 
92 	res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
93 
94 	bb_free(up_bbuf, sizeof(struct utee_params));
95 
96 	return res;
97 }
98 
99 static void update_from_utee_param(struct tee_ta_param *p,
100 				   const struct utee_params *up)
101 {
102 	TEE_Result res = TEE_SUCCESS;
103 	size_t n = 0;
104 	struct utee_params *up_bbuf = NULL;
105 
106 	res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
107 	if (res)
108 		return;
109 
110 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
111 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
112 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
113 		case TEE_PARAM_TYPE_MEMREF_INOUT:
114 			/* See comment for struct utee_params in utee_types.h */
115 			p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
116 			break;
117 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
118 		case TEE_PARAM_TYPE_VALUE_INOUT:
119 			/* See comment for struct utee_params in utee_types.h */
120 			p->u[n].val.a = up_bbuf->vals[n * 2];
121 			p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
122 			break;
123 		default:
124 			break;
125 		}
126 	}
127 
128 	bb_free(up_bbuf, sizeof(*up));
129 }
130 
131 static bool inc_recursion(void)
132 {
133 	struct thread_specific_data *tsd = thread_get_tsd();
134 
135 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
136 		DMSG("Maximum allowed recursion depth reached (%u)",
137 		     CFG_CORE_MAX_SYSCALL_RECURSION);
138 		return false;
139 	}
140 
141 	tsd->syscall_recursion++;
142 	return true;
143 }
144 
145 static void dec_recursion(void)
146 {
147 	struct thread_specific_data *tsd = thread_get_tsd();
148 
149 	assert(tsd->syscall_recursion);
150 	tsd->syscall_recursion--;
151 }
152 
153 static TEE_Result user_ta_enter(struct ts_session *session,
154 				enum utee_entry_func func, uint32_t cmd)
155 {
156 	TEE_Result res = TEE_SUCCESS;
157 	struct utee_params *usr_params = NULL;
158 	uaddr_t usr_stack = 0;
159 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
160 	struct tee_ta_session *ta_sess = to_ta_session(session);
161 	struct ts_session *ts_sess __maybe_unused = NULL;
162 	void *param_va[TEE_NUM_PARAMS] = { NULL };
163 
164 	if (!inc_recursion()) {
165 		/* Using this error code since we've run out of resources. */
166 		res = TEE_ERROR_OUT_OF_MEMORY;
167 		goto out_clr_cancel;
168 	}
169 	if (ta_sess->param) {
170 		/* Map user space memory */
171 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
172 		if (res != TEE_SUCCESS)
173 			goto out;
174 	}
175 
176 	/* Switch to user ctx */
177 	ts_push_current_session(session);
178 
179 	/* Make room for usr_params at top of stack */
180 	usr_stack = utc->uctx.stack_ptr;
181 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
182 	usr_params = (struct utee_params *)usr_stack;
183 	if (ta_sess->param)
184 		res = init_utee_param(usr_params, ta_sess->param, param_va);
185 	else
186 		res = clear_user(usr_params, sizeof(*usr_params));
187 
188 	if (res)
189 		goto out_pop_session;
190 
191 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
192 				     (vaddr_t)usr_params, cmd, usr_stack,
193 				     utc->uctx.entry_func, utc->uctx.is_32bit,
194 				     &utc->ta_ctx.panicked,
195 				     &utc->ta_ctx.panic_code);
196 
197 	thread_user_clear_vfp(&utc->uctx);
198 
199 	if (utc->ta_ctx.panicked) {
200 		abort_print_current_ts();
201 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
202 		     utc->ta_ctx.panic_code);
203 		res = TEE_ERROR_TARGET_DEAD;
204 	} else {
205 		/*
206 		 * According to GP spec the origin should allways be set to
207 		 * the TA after TA execution
208 		 */
209 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
210 	}
211 
212 	if (ta_sess->param) {
213 		/* Copy out value results */
214 		update_from_utee_param(ta_sess->param, usr_params);
215 	}
216 
217 out_pop_session:
218 	if (ta_sess->param) {
219 		/*
220 		 * Clear out the parameter mappings added with
221 		 * vm_clean_param() above.
222 		 */
223 		vm_clean_param(&utc->uctx);
224 	}
225 	ts_sess = ts_pop_current_session();
226 	assert(ts_sess == session);
227 
228 out:
229 	dec_recursion();
230 out_clr_cancel:
231 	/*
232 	 * Clear the cancel state now that the user TA has returned. The next
233 	 * time the TA will be invoked will be with a new operation and should
234 	 * not have an old cancellation pending.
235 	 */
236 	ta_sess->cancel = false;
237 
238 	return res;
239 }
240 
241 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
242 {
243 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
244 }
245 
246 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
247 {
248 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
249 }
250 
251 static void user_ta_enter_close_session(struct ts_session *s)
252 {
253 	/* Only if the TA was fully initialized by ldelf */
254 	if (!to_user_ta_ctx(s->ctx)->uctx.is_initializing)
255 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
256 }
257 
258 #if defined(CFG_TA_STATS)
259 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
260 {
261 	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
262 }
263 #endif
264 
265 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
266 {
267 	user_mode_ctx_print_mappings(&utc->uctx);
268 }
269 
270 static void user_ta_dump_state(struct ts_ctx *ctx)
271 {
272 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
273 
274 	if (utc->uctx.dump_entry_func) {
275 		TEE_Result res = ldelf_dump_state(&utc->uctx);
276 
277 		if (!res || res == TEE_ERROR_TARGET_DEAD)
278 			return;
279 		/*
280 		 * Fall back to dump_state_no_ldelf_dbg() if
281 		 * ldelf_dump_state() fails for some reason.
282 		 *
283 		 * If ldelf_dump_state() failed with panic
284 		 * we are done since abort_print_current_ts() will be
285 		 * called which will dump the memory map.
286 		 */
287 	}
288 
289 	dump_state_no_ldelf_dbg(utc);
290 }
291 
292 #ifdef CFG_FTRACE_SUPPORT
293 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
294 {
295 	uint32_t prot = TEE_MATTR_URW;
296 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
297 	struct thread_param params[3] = { };
298 	TEE_Result res = TEE_SUCCESS;
299 	struct mobj *mobj = NULL;
300 	uint8_t *ubuf = NULL;
301 	void *buf = NULL;
302 	size_t pl_sz = 0;
303 	size_t blen = 0, ld_addr_len = 0;
304 	vaddr_t va = 0;
305 
306 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
307 	if (res != TEE_ERROR_SHORT_BUFFER)
308 		return;
309 
310 #define LOAD_ADDR_DUMP_SIZE	64
311 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
312 			SMALL_PAGE_SIZE);
313 
314 	mobj = thread_rpc_alloc_payload(pl_sz);
315 	if (!mobj) {
316 		EMSG("Ftrace thread_rpc_alloc_payload failed");
317 		return;
318 	}
319 
320 	buf = mobj_get_va(mobj, 0, pl_sz);
321 	if (!buf)
322 		goto out_free_pl;
323 
324 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
325 		     mobj, 0);
326 	if (res)
327 		goto out_free_pl;
328 
329 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
330 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
331 	ubuf += sizeof(TEE_UUID);
332 
333 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
334 			       "TEE load address @ %#"PRIxVA"\n",
335 			       VCORE_START_VA);
336 	ubuf += ld_addr_len;
337 
338 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
339 	if (res) {
340 		EMSG("Ftrace dump failed: %#"PRIx32, res);
341 		goto out_unmap_pl;
342 	}
343 
344 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
345 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
346 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
347 					blen + ld_addr_len);
348 
349 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
350 	if (res)
351 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
352 
353 out_unmap_pl:
354 	res = vm_unmap(&utc->uctx, va, mobj->size);
355 	assert(!res);
356 out_free_pl:
357 	thread_rpc_free_payload(mobj);
358 }
359 #endif /*CFG_FTRACE_SUPPORT*/
360 
361 #ifdef CFG_TA_GPROF_SUPPORT
362 static void user_ta_gprof_set_status(enum ts_gprof_status status)
363 {
364 	if (status == TS_GPROF_SUSPEND)
365 		tee_ta_update_session_utime_suspend();
366 	else
367 		tee_ta_update_session_utime_resume();
368 }
369 #endif /*CFG_TA_GPROF_SUPPORT*/
370 
371 
372 static void release_utc_state(struct user_ta_ctx *utc)
373 {
374 	/*
375 	 * Close sessions opened by this TA
376 	 * Note that tee_ta_close_session() removes the item
377 	 * from the utc->open_sessions list.
378 	 */
379 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
380 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
381 				     &utc->open_sessions, KERN_IDENTITY);
382 	}
383 
384 	vm_info_final(&utc->uctx);
385 
386 	/* Free cryp states created by this TA */
387 	tee_svc_cryp_free_states(utc);
388 	/* Close cryp objects opened by this TA */
389 	tee_obj_close_all(utc);
390 	/* Free emums created by this TA */
391 	tee_svc_storage_close_all_enum(utc);
392 }
393 
394 static void free_utc(struct user_ta_ctx *utc)
395 {
396 	release_utc_state(utc);
397 	free(utc);
398 }
399 
400 static void user_ta_release_state(struct ts_ctx *ctx)
401 {
402 	release_utc_state(to_user_ta_ctx(ctx));
403 }
404 
405 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
406 {
407 	free_utc(to_user_ta_ctx(ctx));
408 }
409 
410 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
411 {
412 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
413 }
414 
415 /*
416  * Note: this variable is weak just to ease breaking its dependency chain
417  * when added to the unpaged area.
418  */
419 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
420 	.enter_open_session = user_ta_enter_open_session,
421 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
422 	.enter_close_session = user_ta_enter_close_session,
423 #if defined(CFG_TA_STATS)
424 	.dump_mem_stats = user_ta_enter_dump_memstats,
425 #endif
426 	.dump_state = user_ta_dump_state,
427 #ifdef CFG_FTRACE_SUPPORT
428 	.dump_ftrace = user_ta_dump_ftrace,
429 #endif
430 	.release_state = user_ta_release_state,
431 	.destroy = user_ta_ctx_destroy,
432 	.get_instance_id = user_ta_get_instance_id,
433 	.handle_scall = scall_handle_user_ta,
434 #ifdef CFG_TA_GPROF_SUPPORT
435 	.gprof_set_status = user_ta_gprof_set_status,
436 #endif
437 };
438 
439 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
440 {
441 	ctx->ts_ctx.ops = &user_ta_ops;
442 }
443 
444 bool is_user_ta_ctx(struct ts_ctx *ctx)
445 {
446 	return ctx && ctx->ops == &user_ta_ops;
447 }
448 
449 static TEE_Result check_ta_store(void)
450 {
451 	const struct ts_store_ops *op = NULL;
452 
453 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
454 		DMSG("TA store: \"%s\"", op->description);
455 
456 	return TEE_SUCCESS;
457 }
458 service_init(check_ta_store);
459 
460 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
461 				       struct tee_ta_session *s)
462 {
463 	TEE_Result res = TEE_SUCCESS;
464 	struct user_ta_ctx *utc = NULL;
465 
466 	utc = calloc(1, sizeof(struct user_ta_ctx));
467 	if (!utc)
468 		return TEE_ERROR_OUT_OF_MEMORY;
469 
470 	TAILQ_INIT(&utc->open_sessions);
471 	TAILQ_INIT(&utc->cryp_states);
472 	TAILQ_INIT(&utc->objects);
473 	TAILQ_INIT(&utc->storage_enums);
474 	condvar_init(&utc->ta_ctx.busy_cv);
475 	utc->ta_ctx.ref_count = 1;
476 
477 	/*
478 	 * Set context TA operation structure. It is required by generic
479 	 * implementation to identify userland TA versus pseudo TA contexts.
480 	 */
481 	set_ta_ctx_ops(&utc->ta_ctx);
482 
483 	utc->ta_ctx.ts_ctx.uuid = *uuid;
484 	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
485 	if (res)
486 		goto out;
487 	utc->uctx.is_initializing = true;
488 
489 #ifdef CFG_TA_PAUTH
490 	crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
491 #endif
492 
493 	mutex_lock(&tee_ta_mutex);
494 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
495 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
496 	/*
497 	 * Another thread trying to load this same TA may need to wait
498 	 * until this context is fully initialized. This is needed to
499 	 * handle single instance TAs.
500 	 */
501 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
502 	mutex_unlock(&tee_ta_mutex);
503 
504 	/*
505 	 * We must not hold tee_ta_mutex while allocating page tables as
506 	 * that may otherwise lead to a deadlock.
507 	 */
508 	ts_push_current_session(&s->ts_sess);
509 
510 	res = ldelf_load_ldelf(&utc->uctx);
511 	if (!res)
512 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
513 
514 	ts_pop_current_session();
515 
516 	mutex_lock(&tee_ta_mutex);
517 
518 	if (!res) {
519 		utc->uctx.is_initializing = false;
520 	} else {
521 		s->ts_sess.ctx = NULL;
522 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
523 	}
524 
525 	/* The state has changed for the context, notify eventual waiters. */
526 	condvar_broadcast(&tee_ta_init_cv);
527 
528 	mutex_unlock(&tee_ta_mutex);
529 
530 out:
531 	if (res) {
532 		condvar_destroy(&utc->ta_ctx.busy_cv);
533 		free_utc(utc);
534 	}
535 
536 	return res;
537 }
538