xref: /optee_os/core/kernel/user_ta.c (revision 4edd96e6d7a7228e907cf498b23e5b5fbdaf39a0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2022 Linaro Limited
5  * Copyright (c) 2020, Arm Limited.
6  */
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <crypto/crypto.h>
11 #include <ctype.h>
12 #include <initcall.h>
13 #include <keep.h>
14 #include <kernel/ldelf_loader.h>
15 #include <kernel/linker.h>
16 #include <kernel/panic.h>
17 #include <kernel/scall.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/tee_ta_manager.h>
20 #include <kernel/thread.h>
21 #include <kernel/ts_store.h>
22 #include <kernel/user_access.h>
23 #include <kernel/user_mode_ctx.h>
24 #include <kernel/user_ta.h>
25 #include <mm/core_memprot.h>
26 #include <mm/core_mmu.h>
27 #include <mm/file.h>
28 #include <mm/fobj.h>
29 #include <mm/mobj.h>
30 #include <mm/pgt_cache.h>
31 #include <mm/tee_mm.h>
32 #include <mm/tee_pager.h>
33 #include <mm/vm.h>
34 #include <optee_rpc_cmd.h>
35 #include <printk.h>
36 #include <signed_hdr.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <sys/queue.h>
40 #include <ta_pub_key.h>
41 #include <tee/tee_cryp_utl.h>
42 #include <tee/tee_obj.h>
43 #include <tee/tee_svc_cryp.h>
44 #include <tee/tee_svc.h>
45 #include <tee/tee_svc_storage.h>
46 #include <tee/uuid.h>
47 #include <trace.h>
48 #include <types_ext.h>
49 #include <utee_defines.h>
50 #include <util.h>
51 
52 static TEE_Result init_utee_param(struct utee_params *up,
53 				  const struct tee_ta_param *p,
54 				  void *va[TEE_NUM_PARAMS])
55 {
56 	TEE_Result res = TEE_SUCCESS;
57 	size_t n = 0;
58 	struct utee_params *up_bbuf = NULL;
59 
60 	up_bbuf = bb_alloc(sizeof(struct utee_params));
61 	if (!up_bbuf)
62 		return TEE_ERROR_OUT_OF_MEMORY;
63 
64 	up_bbuf->types = p->types;
65 
66 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
67 		uintptr_t a;
68 		uintptr_t b;
69 
70 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
71 		case TEE_PARAM_TYPE_MEMREF_INPUT:
72 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
73 		case TEE_PARAM_TYPE_MEMREF_INOUT:
74 			a = (uintptr_t)va[n];
75 			b = p->u[n].mem.size;
76 			break;
77 		case TEE_PARAM_TYPE_VALUE_INPUT:
78 		case TEE_PARAM_TYPE_VALUE_INOUT:
79 			a = p->u[n].val.a;
80 			b = p->u[n].val.b;
81 			break;
82 		default:
83 			a = 0;
84 			b = 0;
85 			break;
86 		}
87 		/* See comment for struct utee_params in utee_types.h */
88 		up_bbuf->vals[n * 2] = a;
89 		up_bbuf->vals[n * 2 + 1] = b;
90 	}
91 
92 	res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));
93 
94 	bb_free(up_bbuf, sizeof(struct utee_params));
95 
96 	return res;
97 }
98 
99 static void update_from_utee_param(struct tee_ta_param *p,
100 				   const struct utee_params *up)
101 {
102 	TEE_Result res = TEE_SUCCESS;
103 	size_t n = 0;
104 	struct utee_params *up_bbuf = NULL;
105 	void *bbuf = NULL;
106 
107 	res = bb_memdup_user(up, sizeof(*up), &bbuf);
108 	if (res)
109 		return;
110 
111 	up_bbuf = bbuf;
112 
113 	for (n = 0; n < TEE_NUM_PARAMS; n++) {
114 		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
115 		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
116 		case TEE_PARAM_TYPE_MEMREF_INOUT:
117 			/* See comment for struct utee_params in utee_types.h */
118 			p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
119 			break;
120 		case TEE_PARAM_TYPE_VALUE_OUTPUT:
121 		case TEE_PARAM_TYPE_VALUE_INOUT:
122 			/* See comment for struct utee_params in utee_types.h */
123 			p->u[n].val.a = up_bbuf->vals[n * 2];
124 			p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
125 			break;
126 		default:
127 			break;
128 		}
129 	}
130 
131 	bb_free(bbuf, sizeof(*up));
132 }
133 
134 static bool inc_recursion(void)
135 {
136 	struct thread_specific_data *tsd = thread_get_tsd();
137 
138 	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
139 		DMSG("Maximum allowed recursion depth reached (%u)",
140 		     CFG_CORE_MAX_SYSCALL_RECURSION);
141 		return false;
142 	}
143 
144 	tsd->syscall_recursion++;
145 	return true;
146 }
147 
148 static void dec_recursion(void)
149 {
150 	struct thread_specific_data *tsd = thread_get_tsd();
151 
152 	assert(tsd->syscall_recursion);
153 	tsd->syscall_recursion--;
154 }
155 
156 static TEE_Result user_ta_enter(struct ts_session *session,
157 				enum utee_entry_func func, uint32_t cmd)
158 {
159 	TEE_Result res = TEE_SUCCESS;
160 	struct utee_params *usr_params = NULL;
161 	uaddr_t usr_stack = 0;
162 	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
163 	struct tee_ta_session *ta_sess = to_ta_session(session);
164 	struct ts_session *ts_sess __maybe_unused = NULL;
165 	void *param_va[TEE_NUM_PARAMS] = { NULL };
166 
167 	if (!inc_recursion()) {
168 		/* Using this error code since we've run out of resources. */
169 		res = TEE_ERROR_OUT_OF_MEMORY;
170 		goto out_clr_cancel;
171 	}
172 	if (ta_sess->param) {
173 		/* Map user space memory */
174 		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
175 		if (res != TEE_SUCCESS)
176 			goto out;
177 	}
178 
179 	/* Switch to user ctx */
180 	ts_push_current_session(session);
181 
182 	/* Make room for usr_params at top of stack */
183 	usr_stack = utc->uctx.stack_ptr;
184 	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
185 	usr_params = (struct utee_params *)usr_stack;
186 	if (ta_sess->param)
187 		res = init_utee_param(usr_params, ta_sess->param, param_va);
188 	else
189 		res = clear_user(usr_params, sizeof(*usr_params));
190 
191 	if (res)
192 		goto out_pop_session;
193 
194 	res = thread_enter_user_mode(func, kaddr_to_uref(session),
195 				     (vaddr_t)usr_params, cmd, usr_stack,
196 				     utc->uctx.entry_func, utc->uctx.is_32bit,
197 				     &utc->ta_ctx.panicked,
198 				     &utc->ta_ctx.panic_code);
199 
200 	thread_user_clear_vfp(&utc->uctx);
201 
202 	if (utc->ta_ctx.panicked) {
203 		abort_print_current_ts();
204 		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
205 		     utc->ta_ctx.panic_code);
206 		res = TEE_ERROR_TARGET_DEAD;
207 	} else {
208 		/*
209 		 * According to GP spec the origin should allways be set to
210 		 * the TA after TA execution
211 		 */
212 		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
213 	}
214 
215 	if (ta_sess->param) {
216 		/* Copy out value results */
217 		update_from_utee_param(ta_sess->param, usr_params);
218 	}
219 
220 out_pop_session:
221 	if (ta_sess->param) {
222 		/*
223 		 * Clear out the parameter mappings added with
224 		 * vm_clean_param() above.
225 		 */
226 		vm_clean_param(&utc->uctx);
227 	}
228 	ts_sess = ts_pop_current_session();
229 	assert(ts_sess == session);
230 
231 out:
232 	dec_recursion();
233 out_clr_cancel:
234 	/*
235 	 * Clear the cancel state now that the user TA has returned. The next
236 	 * time the TA will be invoked will be with a new operation and should
237 	 * not have an old cancellation pending.
238 	 */
239 	ta_sess->cancel = false;
240 
241 	return res;
242 }
243 
244 static TEE_Result user_ta_enter_open_session(struct ts_session *s)
245 {
246 	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
247 }
248 
249 static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
250 {
251 	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
252 }
253 
254 static void user_ta_enter_close_session(struct ts_session *s)
255 {
256 	/* Only if the TA was fully initialized by ldelf */
257 	if (!to_user_ta_ctx(s->ctx)->uctx.is_initializing)
258 		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
259 }
260 
261 #if defined(CFG_TA_STATS)
262 static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
263 {
264 	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
265 }
266 #endif
267 
268 static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
269 {
270 	user_mode_ctx_print_mappings(&utc->uctx);
271 }
272 
273 static void user_ta_dump_state(struct ts_ctx *ctx)
274 {
275 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
276 
277 	if (utc->uctx.dump_entry_func) {
278 		TEE_Result res = ldelf_dump_state(&utc->uctx);
279 
280 		if (!res || res == TEE_ERROR_TARGET_DEAD)
281 			return;
282 		/*
283 		 * Fall back to dump_state_no_ldelf_dbg() if
284 		 * ldelf_dump_state() fails for some reason.
285 		 *
286 		 * If ldelf_dump_state() failed with panic
287 		 * we are done since abort_print_current_ts() will be
288 		 * called which will dump the memory map.
289 		 */
290 	}
291 
292 	dump_state_no_ldelf_dbg(utc);
293 }
294 
295 #ifdef CFG_FTRACE_SUPPORT
296 static void user_ta_dump_ftrace(struct ts_ctx *ctx)
297 {
298 	uint32_t prot = TEE_MATTR_URW;
299 	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
300 	struct thread_param params[3] = { };
301 	TEE_Result res = TEE_SUCCESS;
302 	struct mobj *mobj = NULL;
303 	uint8_t *ubuf = NULL;
304 	void *buf = NULL;
305 	size_t pl_sz = 0;
306 	size_t blen = 0, ld_addr_len = 0;
307 	vaddr_t va = 0;
308 
309 	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
310 	if (res != TEE_ERROR_SHORT_BUFFER)
311 		return;
312 
313 #define LOAD_ADDR_DUMP_SIZE	64
314 	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
315 			SMALL_PAGE_SIZE);
316 
317 	mobj = thread_rpc_alloc_payload(pl_sz);
318 	if (!mobj) {
319 		EMSG("Ftrace thread_rpc_alloc_payload failed");
320 		return;
321 	}
322 
323 	buf = mobj_get_va(mobj, 0, pl_sz);
324 	if (!buf)
325 		goto out_free_pl;
326 
327 	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
328 		     mobj, 0);
329 	if (res)
330 		goto out_free_pl;
331 
332 	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
333 	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
334 	ubuf += sizeof(TEE_UUID);
335 
336 	ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
337 			       "TEE load address @ %#"PRIxVA"\n",
338 			       VCORE_START_VA);
339 	ubuf += ld_addr_len;
340 
341 	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
342 	if (res) {
343 		EMSG("Ftrace dump failed: %#"PRIx32, res);
344 		goto out_unmap_pl;
345 	}
346 
347 	params[0] = THREAD_PARAM_VALUE(INOUT, 0, 0, 0);
348 	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
349 	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
350 					blen + ld_addr_len);
351 
352 	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
353 	if (res)
354 		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
355 
356 out_unmap_pl:
357 	res = vm_unmap(&utc->uctx, va, mobj->size);
358 	assert(!res);
359 out_free_pl:
360 	thread_rpc_free_payload(mobj);
361 }
362 #endif /*CFG_FTRACE_SUPPORT*/
363 
364 #ifdef CFG_TA_GPROF_SUPPORT
365 static void user_ta_gprof_set_status(enum ts_gprof_status status)
366 {
367 	if (status == TS_GPROF_SUSPEND)
368 		tee_ta_update_session_utime_suspend();
369 	else
370 		tee_ta_update_session_utime_resume();
371 }
372 #endif /*CFG_TA_GPROF_SUPPORT*/
373 
374 static void free_utc(struct user_ta_ctx *utc)
375 {
376 
377 	/*
378 	 * Close sessions opened by this TA
379 	 * Note that tee_ta_close_session() removes the item
380 	 * from the utc->open_sessions list.
381 	 */
382 	while (!TAILQ_EMPTY(&utc->open_sessions)) {
383 		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
384 				     &utc->open_sessions, KERN_IDENTITY);
385 	}
386 
387 	vm_info_final(&utc->uctx);
388 
389 	/* Free cryp states created by this TA */
390 	tee_svc_cryp_free_states(utc);
391 	/* Close cryp objects opened by this TA */
392 	tee_obj_close_all(utc);
393 	/* Free emums created by this TA */
394 	tee_svc_storage_close_all_enum(utc);
395 	free(utc);
396 }
397 
398 static void user_ta_ctx_destroy(struct ts_ctx *ctx)
399 {
400 	free_utc(to_user_ta_ctx(ctx));
401 }
402 
403 static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
404 {
405 	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
406 }
407 
408 /*
409  * Note: this variable is weak just to ease breaking its dependency chain
410  * when added to the unpaged area.
411  */
412 const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
413 	.enter_open_session = user_ta_enter_open_session,
414 	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
415 	.enter_close_session = user_ta_enter_close_session,
416 #if defined(CFG_TA_STATS)
417 	.dump_mem_stats = user_ta_enter_dump_memstats,
418 #endif
419 	.dump_state = user_ta_dump_state,
420 #ifdef CFG_FTRACE_SUPPORT
421 	.dump_ftrace = user_ta_dump_ftrace,
422 #endif
423 	.destroy = user_ta_ctx_destroy,
424 	.get_instance_id = user_ta_get_instance_id,
425 	.handle_scall = scall_handle_user_ta,
426 #ifdef CFG_TA_GPROF_SUPPORT
427 	.gprof_set_status = user_ta_gprof_set_status,
428 #endif
429 };
430 
431 static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
432 {
433 	ctx->ts_ctx.ops = &user_ta_ops;
434 }
435 
436 bool is_user_ta_ctx(struct ts_ctx *ctx)
437 {
438 	return ctx && ctx->ops == &user_ta_ops;
439 }
440 
441 static TEE_Result check_ta_store(void)
442 {
443 	const struct ts_store_ops *op = NULL;
444 
445 	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
446 		DMSG("TA store: \"%s\"", op->description);
447 
448 	return TEE_SUCCESS;
449 }
450 service_init(check_ta_store);
451 
452 TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
453 				       struct tee_ta_session *s)
454 {
455 	TEE_Result res = TEE_SUCCESS;
456 	struct user_ta_ctx *utc = NULL;
457 
458 	utc = calloc(1, sizeof(struct user_ta_ctx));
459 	if (!utc)
460 		return TEE_ERROR_OUT_OF_MEMORY;
461 
462 	TAILQ_INIT(&utc->open_sessions);
463 	TAILQ_INIT(&utc->cryp_states);
464 	TAILQ_INIT(&utc->objects);
465 	TAILQ_INIT(&utc->storage_enums);
466 	condvar_init(&utc->ta_ctx.busy_cv);
467 	utc->ta_ctx.ref_count = 1;
468 
469 	/*
470 	 * Set context TA operation structure. It is required by generic
471 	 * implementation to identify userland TA versus pseudo TA contexts.
472 	 */
473 	set_ta_ctx_ops(&utc->ta_ctx);
474 
475 	utc->ta_ctx.ts_ctx.uuid = *uuid;
476 	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
477 	if (res)
478 		goto out;
479 	utc->uctx.is_initializing = true;
480 
481 #ifdef CFG_TA_PAUTH
482 	crypto_rng_read(&utc->uctx.keys, sizeof(utc->uctx.keys));
483 #endif
484 
485 	mutex_lock(&tee_ta_mutex);
486 	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
487 	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
488 	/*
489 	 * Another thread trying to load this same TA may need to wait
490 	 * until this context is fully initialized. This is needed to
491 	 * handle single instance TAs.
492 	 */
493 	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);
494 	mutex_unlock(&tee_ta_mutex);
495 
496 	/*
497 	 * We must not hold tee_ta_mutex while allocating page tables as
498 	 * that may otherwise lead to a deadlock.
499 	 */
500 	ts_push_current_session(&s->ts_sess);
501 
502 	res = ldelf_load_ldelf(&utc->uctx);
503 	if (!res)
504 		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);
505 
506 	ts_pop_current_session();
507 
508 	mutex_lock(&tee_ta_mutex);
509 
510 	if (!res) {
511 		utc->uctx.is_initializing = false;
512 	} else {
513 		s->ts_sess.ctx = NULL;
514 		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
515 	}
516 
517 	/* The state has changed for the context, notify eventual waiters. */
518 	condvar_broadcast(&tee_ta_init_cv);
519 
520 	mutex_unlock(&tee_ta_mutex);
521 
522 out:
523 	if (res) {
524 		condvar_destroy(&utc->ta_ctx.busy_cv);
525 		free_utc(utc);
526 	}
527 
528 	return res;
529 }
530