xref: /optee_os/core/kernel/user_ta.c (revision 49286073c91e225524563320e42ee35f1fee9167)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2022 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <initcall.h>
12 #include <keep.h>
13 #include <kernel/ldelf_loader.h>
14 #include <kernel/linker.h>
15 #include <kernel/panic.h>
16 #include <kernel/scall.h>
17 #include <kernel/tee_ta_manager.h>
18 #include <kernel/thread.h>
19 #include <kernel/ts_store.h>
20 #include <kernel/user_access.h>
21 #include <kernel/user_mode_ctx.h>
22 #include <kernel/user_ta.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <mm/file.h>
26 #include <mm/fobj.h>
27 #include <mm/mobj.h>
28 #include <mm/pgt_cache.h>
29 #include <mm/tee_mm.h>
30 #include <mm/tee_pager.h>
31 #include <mm/vm.h>
32 #include <optee_rpc_cmd.h>
33 #include <printk.h>
34 #include <signed_hdr.h>
35 #include <stdlib.h>
36 #include <sys/queue.h>
37 #include <tee/tee_cryp_utl.h>
38 #include <tee/tee_obj.h>
39 #include <tee/tee_svc_cryp.h>
40 #include <tee/tee_svc_storage.h>
41 #include <trace.h>
42 #include <types_ext.h>
43 #include <utee_defines.h>
44 #include <util.h>
45 
46 static TEE_Result init_utee_param(struct utee_params *up,
47 				  const struct tee_ta_param *p,
48 				  void *va[TEE_NUM_PARAMS])
49 {
50 	TEE_Result res = TEE_SUCCESS;
51 	size_t n = 0;
52 	struct utee_params *up_bbuf = NULL;
53 
54 	up_bbuf = bb_alloc(sizeof(struct utee_params));
55 	if (!up_bbuf)
56 		return TEE_ERROR_OUT_OF_MEMORY;
57 
58 	up_bbuf->types = p->types;
59 
60 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
61 		uintptr_t a;
62 		uintptr_t b;
63 
64 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
65 		case TEE_PARAM_TYPE_MEMREF_INPUT:
66 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
67 		case TEE_PARAM_TYPE_MEMREF_INOUT:
68 			a = (uintptr_t)va[n];
69 			b = p->u[n].mem.size;
70 			break;
71 		case TEE_PARAM_TYPE_VALUE_INPUT:
72 		case TEE_PARAM_TYPE_VALUE_INOUT:
73 			a = p->u[n].val.a;
74 			b = p->u[n].val.b;
75 			break;
76 		default:
77 			a = 0;
78 			b = 0;
79 			break;
80 		}
81 		/* See comment for struct utee_params in utee_types.h */
82 		up_bbuf->vals[n * 2] = a;
83 		up_bbuf->vals[n * 2 + 1] = b;
84 	}
85 
86 	res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
87 
88 	bb_free(up_bbuf, sizeof(struct utee_params));
89 
90 	return res;
91 }
92 
93 static void update_from_utee_param(struct tee_ta_param *p,
94 				   const struct utee_params *up)
95 {
96 	TEE_Result res = TEE_SUCCESS;
97 	size_t n = 0;
98 	struct utee_params *up_bbuf = NULL;
99 
100 	res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
101 	if (res)
102 		return;
103 
104 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
105 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
106 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
107 		case TEE_PARAM_TYPE_MEMREF_INOUT:
108 			/* See comment for struct utee_params in utee_types.h */
109 			p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
110 			break;
111 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
112 		case TEE_PARAM_TYPE_VALUE_INOUT:
113 			/* See comment for struct utee_params in utee_types.h */
114 			p->u[n].val.a = up_bbuf->vals[n * 2];
115 			p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
116 			break;
117 		default:
118 			break;
119 		}
120 	}
121 
122 	bb_free(up_bbuf, sizeof(*up));
123 }
124 
125 static bool inc_recursion(void)
126 {
127 	struct thread_specific_data *tsd = thread_get_tsd();
128 
129 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
130 		DMSG("Maximum allowed recursion depth reached (%u)",
131 		     CFG_CORE_MAX_SYSCALL_RECURSION);
132 		return false;
133 	}
134 
135 	tsd->syscall_recursion++;
136 	return true;
137 }
138 
139 static void dec_recursion(void)
140 {
141 	struct thread_specific_data *tsd = thread_get_tsd();
142 
143 	assert(tsd->syscall_recursion);
144 	tsd->syscall_recursion--;
145 }
146 
147 static TEE_Result user_ta_enter(struct ts_session *session,
148 				enum utee_entry_func func, uint32_t cmd)
149 {
150 	TEE_Result res = TEE_SUCCESS;
151 	struct utee_params *usr_params = NULL;
152 	uaddr_t usr_stack = 0;
153 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
154 	struct tee_ta_session *ta_sess = to_ta_session(session);
155 	struct ts_session *ts_sess __maybe_unused = NULL;
156 	void *param_va[TEE_NUM_PARAMS] = { NULL };
157 
158 	if (!inc_recursion()) {
159 		/* Using this error code since we've run out of resources. */
160 		res = TEE_ERROR_OUT_OF_MEMORY;
161 		goto out_clr_cancel;
162 	}
163 	if (ta_sess->param) {
164 		/* Map user space memory */
165 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
166 		if (res != TEE_SUCCESS)
167 			goto out;
168 	}
169 
170 	/* Switch to user ctx */
171 	ts_push_current_session(session);
172 
173 	/* Make room for usr_params at top of stack */
174 	usr_stack = utc->uctx.stack_ptr;
175 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
176 	usr_params = (struct utee_params *)usr_stack;
177 	if (ta_sess->param)
178 		res = init_utee_param(usr_params, ta_sess->param, param_va);
179 	else
180 		res = clear_user(usr_params, sizeof(*usr_params));
181 
182 	if (res)
183 		goto out_pop_session;
184 
185 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
186 				     (vaddr_t)usr_params, cmd, usr_stack,
187 				     utc->uctx.entry_func, utc->uctx.is_32bit,
188 				     &utc->ta_ctx.panicked,
189 				     &utc->ta_ctx.panic_code);
190 
191 	thread_user_clear_vfp(&utc->uctx);
192 
193 	if (utc->ta_ctx.panicked) {
194 		abort_print_current_ts();
195 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
196 		     utc->ta_ctx.panic_code);
197 		res = TEE_ERROR_TARGET_DEAD;
198 	} else {
199 		/*
200 		 * According to GP spec the origin should allways be set to
201 		 * the TA after TA execution
202 		 */
203 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
204 	}
205 
206 	if (ta_sess->param) {
207 		/* Copy out value results */
208 		update_from_utee_param(ta_sess->param, usr_params);
209 	}
210 
211 out_pop_session:
212 	if (ta_sess->param) {
213 		/*
214 		 * Clear out the parameter mappings added with
215 		 * vm_clean_param() above.
216 		 */
217 		vm_clean_param(&utc->uctx);
218 	}
219 	ts_sess = ts_pop_current_session();
220 	assert(ts_sess == session);
221 
222 out:
223 	dec_recursion();
224 out_clr_cancel:
225 	/*
226 	 * Clear the cancel state now that the user TA has returned. The next
227 	 * time the TA will be invoked will be with a new operation and should
228 	 * not have an old cancellation pending.
229 	 */
230 	ta_sess->cancel = false;
231 
232 	return res;
233 }
234 
235 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
236 {
237 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
238 }
239 
240 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
241 {
242 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
243 }
244 
245 static void user_ta_enter_close_session(struct ts_session *s)
246 {
247 	/* Only if the TA was fully initialized by ldelf */
248 	if (!to_user_ta_ctx(s->ctx)->uctx.is_initializing)
249 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
250 }
251 
252 #if defined(CFG_TA_STATS)
253 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
254 {
255 	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
256 }
257 #endif
258 
259 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
260 {
261 	user_mode_ctx_print_mappings(&utc->uctx);
262 }
263 
264 static void user_ta_dump_state(struct ts_ctx *ctx)
265 {
266 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
267 
268 	if (utc->uctx.dump_entry_func) {
269 		TEE_Result res = ldelf_dump_state(&utc->uctx);
270 
271 		if (!res || res == TEE_ERROR_TARGET_DEAD)
272 			return;
273 		/*
274 		 * Fall back to dump_state_no_ldelf_dbg() if
275 		 * ldelf_dump_state() fails for some reason.
276 		 *
277 		 * If ldelf_dump_state() failed with panic
278 		 * we are done since abort_print_current_ts() will be
279 		 * called which will dump the memory map.
280 		 */
281 	}
282 
283 	dump_state_no_ldelf_dbg(utc);
284 }
285 
286 #ifdef CFG_FTRACE_SUPPORT
287 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
288 {
289 	uint32_t prot = TEE_MATTR_URW;
290 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
291 	struct thread_param params[3] = { };
292 	TEE_Result res = TEE_SUCCESS;
293 	struct mobj *mobj = NULL;
294 	uint8_t *ubuf = NULL;
295 	void *buf = NULL;
296 	size_t pl_sz = 0;
297 	size_t blen = 0, ld_addr_len = 0;
298 	vaddr_t va = 0;
299 
300 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
301 	if (res != TEE_ERROR_SHORT_BUFFER)
302 		return;
303 
304 #define LOAD_ADDR_DUMP_SIZE	64
305 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
306 			SMALL_PAGE_SIZE);
307 
308 	mobj = thread_rpc_alloc_payload(pl_sz);
309 	if (!mobj) {
310 		EMSG("Ftrace thread_rpc_alloc_payload failed");
311 		return;
312 	}
313 
314 	buf = mobj_get_va(mobj, 0, pl_sz);
315 	if (!buf)
316 		goto out_free_pl;
317 
318 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
319 		     mobj, 0);
320 	if (res)
321 		goto out_free_pl;
322 
323 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
324 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
325 	ubuf += sizeof(TEE_UUID);
326 
327 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
328 			       "TEE load address @ %#"PRIxVA"\n",
329 			       VCORE_START_VA);
330 	ubuf += ld_addr_len;
331 
332 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
333 	if (res) {
334 		EMSG("Ftrace dump failed: %#"PRIx32, res);
335 		goto out_unmap_pl;
336 	}
337 
338 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
339 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
340 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
341 					blen + ld_addr_len);
342 
343 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
344 	if (res)
345 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
346 
347 out_unmap_pl:
348 	res = vm_unmap(&utc->uctx, va, mobj->size);
349 	assert(!res);
350 out_free_pl:
351 	thread_rpc_free_payload(mobj);
352 }
353 #endif /*CFG_FTRACE_SUPPORT*/
354 
355 #ifdef CFG_TA_GPROF_SUPPORT
356 static void user_ta_gprof_set_status(enum ts_gprof_status status)
357 {
358 	if (status == TS_GPROF_SUSPEND)
359 		tee_ta_update_session_utime_suspend();
360 	else
361 		tee_ta_update_session_utime_resume();
362 }
363 #endif /*CFG_TA_GPROF_SUPPORT*/
364 
365 
366 static void release_utc_state(struct user_ta_ctx *utc)
367 {
368 	/*
369 	 * Close sessions opened by this TA
370 	 * Note that tee_ta_close_session() removes the item
371 	 * from the utc->open_sessions list.
372 	 */
373 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
374 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
375 				     &utc->open_sessions, KERN_IDENTITY);
376 	}
377 
378 	vm_info_final(&utc->uctx);
379 
380 	/* Free cryp states created by this TA */
381 	tee_svc_cryp_free_states(utc);
382 	/* Close cryp objects opened by this TA */
383 	tee_obj_close_all(utc);
384 	/* Free emums created by this TA */
385 	tee_svc_storage_close_all_enum(utc);
386 }
387 
388 static void free_utc(struct user_ta_ctx *utc)
389 {
390 	release_utc_state(utc);
391 	free(utc);
392 }
393 
394 static void user_ta_release_state(struct ts_ctx *ctx)
395 {
396 	release_utc_state(to_user_ta_ctx(ctx));
397 }
398 
399 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
400 {
401 	free_utc(to_user_ta_ctx(ctx));
402 }
403 
404 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
405 {
406 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
407 }
408 
409 /*
410  * Note: this variable is weak just to ease breaking its dependency chain
411  * when added to the unpaged area.
412  */
413 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
414 	.enter_open_session = user_ta_enter_open_session,
415 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
416 	.enter_close_session = user_ta_enter_close_session,
417 #if defined(CFG_TA_STATS)
418 	.dump_mem_stats = user_ta_enter_dump_memstats,
419 #endif
420 	.dump_state = user_ta_dump_state,
421 #ifdef CFG_FTRACE_SUPPORT
422 	.dump_ftrace = user_ta_dump_ftrace,
423 #endif
424 	.release_state = user_ta_release_state,
425 	.destroy = user_ta_ctx_destroy,
426 	.get_instance_id = user_ta_get_instance_id,
427 	.handle_scall = scall_handle_user_ta,
428 #ifdef CFG_TA_GPROF_SUPPORT
429 	.gprof_set_status = user_ta_gprof_set_status,
430 #endif
431 };
432 
433 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
434 {
435 	ctx->ts_ctx.ops = &user_ta_ops;
436 }
437 
438 bool __noprof is_user_ta_ctx(struct ts_ctx *ctx)
439 {
440 	return ctx && ctx->ops == &user_ta_ops;
441 }
442 
443 static TEE_Result check_ta_store(void)
444 {
445 	const struct ts_store_ops *op = NULL;
446 
447 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
448 		DMSG("TA store: \"%s\"", op->description);
449 
450 	return TEE_SUCCESS;
451 }
452 service_init(check_ta_store);
453 
454 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
455 				       struct tee_ta_session *s)
456 {
457 	TEE_Result res = TEE_SUCCESS;
458 	struct user_ta_ctx *utc = NULL;
459 
460 	utc = calloc(1, sizeof(struct user_ta_ctx));
461 	if (!utc)
462 		return TEE_ERROR_OUT_OF_MEMORY;
463 
464 	TAILQ_INIT(&utc->open_sessions);
465 	TAILQ_INIT(&utc->cryp_states);
466 	TAILQ_INIT(&utc->objects);
467 	TAILQ_INIT(&utc->storage_enums);
468 	condvar_init(&utc->ta_ctx.busy_cv);
469 	utc->ta_ctx.ref_count = 1;
470 
471 	/*
472 	 * Set context TA operation structure. It is required by generic
473 	 * implementation to identify userland TA versus pseudo TA contexts.
474 	 */
475 	set_ta_ctx_ops(&utc->ta_ctx);
476 
477 	utc->ta_ctx.ts_ctx.uuid = *uuid;
478 	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
479 	if (res)
480 		goto out;
481 	utc->uctx.is_initializing = true;
482 
483 #ifdef CFG_TA_PAUTH
484 	crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
485 #endif
486 
487 	mutex_lock(&tee_ta_mutex);
488 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
489 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
490 	/*
491 	 * Another thread trying to load this same TA may need to wait
492 	 * until this context is fully initialized. This is needed to
493 	 * handle single instance TAs.
494 	 */
495 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
496 	mutex_unlock(&tee_ta_mutex);
497 
498 	/*
499 	 * We must not hold tee_ta_mutex while allocating page tables as
500 	 * that may otherwise lead to a deadlock.
501 	 */
502 	ts_push_current_session(&s->ts_sess);
503 
504 	res = ldelf_load_ldelf(&utc->uctx);
505 	if (!res)
506 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
507 
508 	ts_pop_current_session();
509 
510 	mutex_lock(&tee_ta_mutex);
511 
512 	if (!res) {
513 		utc->uctx.is_initializing = false;
514 	} else {
515 		s->ts_sess.ctx = NULL;
516 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
517 	}
518 
519 	/* The state has changed for the context, notify eventual waiters. */
520 	condvar_broadcast(&tee_ta_init_cv);
521 
522 	mutex_unlock(&tee_ta_mutex);
523 
524 out:
525 	if (res) {
526 		condvar_destroy(&utc->ta_ctx.busy_cv);
527 		free_utc(utc);
528 	}
529 
530 	return res;
531 }
532