// SPDX-License-Identifier: BSD-2-Clause
/*
 * Copyright (c) 2014, STMicroelectronics International N.V.
 * Copyright (c) 2015-2022 Linaro Limited
 * Copyright (c) 2020, Arm Limited.
 */

#include <assert.h>
#include <compiler.h>
#include <crypto/crypto.h>
#include <initcall.h>
#include <keep.h>
#include <kernel/ldelf_loader.h>
#include <kernel/linker.h>
#include <kernel/panic.h>
#include <kernel/scall.h>
#include <kernel/tee_ta_manager.h>
#include <kernel/thread.h>
#include <kernel/ts_store.h>
#include <kernel/user_access.h>
#include <kernel/user_mode_ctx.h>
#include <kernel/user_ta.h>
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include <mm/file.h>
#include <mm/fobj.h>
#include <mm/mobj.h>
#include <mm/pgt_cache.h>
#include <mm/tee_mm.h>
#include <mm/tee_pager.h>
#include <mm/vm.h>
#include <optee_rpc_cmd.h>
#include <printk.h>
#include <signed_hdr.h>
#include <stdlib.h>
#include <string_ext.h>
#include <sys/queue.h>
#include <tee/tee_cryp_utl.h>
#include <tee/tee_obj.h>
#include <tee/tee_svc_cryp.h>
#include <tee/tee_svc_storage.h>
#include <trace.h>
#include <types_ext.h>
#include <utee_defines.h>
#include <util.h>

static TEE_Result init_utee_param(struct utee_params *up,
				  const struct tee_ta_param *p,
				  void *va[TEE_NUM_PARAMS])
{
	TEE_Result res = TEE_SUCCESS;
	size_t n = 0;
	struct utee_params *up_bbuf = NULL;

	up_bbuf = bb_alloc(sizeof(struct utee_params));
	if (!up_bbuf)
		return TEE_ERROR_OUT_OF_MEMORY;

	up_bbuf->types = p->types;

	for (n = 0; n < TEE_NUM_PARAMS; n++) {
		uintptr_t a;
		uintptr_t b;

		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_INPUT:
		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			a = (uintptr_t)va[n];
			b = p->u[n].mem.size;
			break;
		case TEE_PARAM_TYPE_VALUE_INPUT:
		case TEE_PARAM_TYPE_VALUE_INOUT:
			a = p->u[n].val.a;
			b = p->u[n].val.b;
			break;
		default:
			a = 0;
			b = 0;
			break;
		}
		/* See comment for struct utee_params in utee_types.h */
		up_bbuf->vals[n * 2] = a;
		up_bbuf->vals[n * 2 + 1] = b;
	}

	res = copy_to_user(up, up_bbuf, sizeof(struct utee_params));

	bb_free(up_bbuf, sizeof(struct utee_params));

	return res;
}

static void update_from_utee_param(struct tee_ta_param *p,
				   const struct utee_params *up)
{
	TEE_Result res = TEE_SUCCESS;
	size_t n = 0;
	struct utee_params *up_bbuf = NULL;

	res = BB_MEMDUP_USER(up, sizeof(*up), &up_bbuf);
	if (res)
		return;

	for (n = 0; n < TEE_NUM_PARAMS; n++) {
		switch (TEE_PARAM_TYPE_GET(p->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			/* See comment for struct utee_params in utee_types.h */
			p->u[n].mem.size = up_bbuf->vals[n * 2 + 1];
			break;
		case TEE_PARAM_TYPE_VALUE_OUTPUT:
		case TEE_PARAM_TYPE_VALUE_INOUT:
			/* See comment for struct utee_params in utee_types.h */
			p->u[n].val.a = up_bbuf->vals[n * 2];
			p->u[n].val.b = up_bbuf->vals[n * 2 + 1];
			break;
		default:
			break;
		}
	}

	bb_free(up_bbuf, sizeof(*up));
}

static bool inc_recursion(void)
{
	struct thread_specific_data *tsd = thread_get_tsd();

	if (tsd->syscall_recursion >= CFG_CORE_MAX_SYSCALL_RECURSION) {
		DMSG("Maximum allowed recursion depth reached (%u)",
		     CFG_CORE_MAX_SYSCALL_RECURSION);
		return false;
	}

	tsd->syscall_recursion++;
	return true;
}

static void dec_recursion(void)
{
	struct thread_specific_data *tsd = thread_get_tsd();

	assert(tsd->syscall_recursion);
	tsd->syscall_recursion--;
}

static TEE_Result user_ta_enter(struct ts_session *session,
				enum utee_entry_func func, uint32_t cmd)
{
	TEE_Result res = TEE_SUCCESS;
	struct utee_params *usr_params = NULL;
	uaddr_t usr_stack = 0;
	struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
	struct tee_ta_session *ta_sess = to_ta_session(session);
	struct ts_session *ts_sess __maybe_unused = NULL;
	void *param_va[TEE_NUM_PARAMS] = { NULL };

	if (!inc_recursion()) {
		/* Using this error code since we've run out of resources. */
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out_clr_cancel;
	}
	if (ta_sess->param) {
		/* Map user space memory */
		res = vm_map_param(&utc->uctx, ta_sess->param, param_va);
		if (res != TEE_SUCCESS)
			goto out;
	}

	/* Switch to user ctx */
	ts_push_current_session(session);

	/* Make room for usr_params at top of stack */
	usr_stack = utc->uctx.stack_ptr;
	usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
	usr_params = (struct utee_params *)usr_stack;
	if (ta_sess->param)
		res = init_utee_param(usr_params, ta_sess->param, param_va);
	else
		res = clear_user(usr_params, sizeof(*usr_params));

	if (res)
		goto out_pop_session;

	res = thread_enter_user_mode(func, kaddr_to_uref(session),
				     (vaddr_t)usr_params, cmd, usr_stack,
				     utc->uctx.entry_func, utc->uctx.is_32bit,
				     &utc->ta_ctx.panicked,
				     &utc->ta_ctx.panic_code);

	thread_user_clear_vfp(&utc->uctx);

	if (utc->ta_ctx.panicked) {
		abort_print_current_ts();
		DMSG("tee_user_ta_enter: TA panicked with code 0x%x",
		     utc->ta_ctx.panic_code);
		res = TEE_ERROR_TARGET_DEAD;
	} else {
		/*
		 * According to GP spec the origin should allways be set to
		 * the TA after TA execution
		 */
		ta_sess->err_origin = TEE_ORIGIN_TRUSTED_APP;
	}

	if (ta_sess->param) {
		/* Copy out value results */
		update_from_utee_param(ta_sess->param, usr_params);
	}

out_pop_session:
	if (ta_sess->param) {
		/*
		 * Clear out the parameter mappings added with
		 * vm_clean_param() above.
		 */
		vm_clean_param(&utc->uctx);
	}
	ts_sess = ts_pop_current_session();
	assert(ts_sess == session);

out:
	dec_recursion();
out_clr_cancel:
	/*
	 * Reset the cancel state now that the user TA has returned. The next
	 * time the TA will be invoked will be with a new operation and should
	 * not have an old cancellation pending.
	 */
	ta_sess->cancel = false;
	ta_sess->cancel_mask = true;

	return res;
}

static TEE_Result user_ta_enter_open_session(struct ts_session *s)
{
	return user_ta_enter(s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0);
}

static TEE_Result user_ta_enter_invoke_cmd(struct ts_session *s, uint32_t cmd)
{
	return user_ta_enter(s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd);
}

static void user_ta_enter_close_session(struct ts_session *s)
{
	/* Only if the TA was fully initialized by ldelf */
	if (!to_user_ta_ctx(s->ctx)->ta_ctx.is_initializing)
		user_ta_enter(s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0);
}

#if defined(CFG_TA_STATS)
static TEE_Result user_ta_enter_dump_memstats(struct ts_session *s)
{
	return user_ta_enter(s, UTEE_ENTRY_FUNC_DUMP_MEMSTATS, 0);
}
#endif

static void dump_state_no_ldelf_dbg(struct user_ta_ctx *utc)
{
	user_mode_ctx_print_mappings(&utc->uctx);
}

static void user_ta_dump_state(struct ts_ctx *ctx)
{
	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);

	if (utc->uctx.dump_entry_func) {
		TEE_Result res = ldelf_dump_state(&utc->uctx);

		if (!res || res == TEE_ERROR_TARGET_DEAD)
			return;
		/*
		 * Fall back to dump_state_no_ldelf_dbg() if
		 * ldelf_dump_state() fails for some reason.
		 *
		 * If ldelf_dump_state() failed with panic
		 * we are done since abort_print_current_ts() will be
		 * called which will dump the memory map.
		 */
	}

	dump_state_no_ldelf_dbg(utc);
}

#ifdef CFG_FTRACE_SUPPORT
static void user_ta_dump_ftrace(struct ts_ctx *ctx)
{
	uint32_t prot = TEE_MATTR_URW;
	struct ts_session *sess = ts_get_current_session();
	struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
	struct thread_param params[3] = { };
	TEE_Result res = TEE_SUCCESS;
	struct mobj *mobj = NULL;
	uint8_t *ubuf = NULL;
	void *buf = NULL;
	size_t pl_sz = 0;
	size_t blen = 0, ld_addr_len = 0;
	uint32_t dump_id = 0;
	vaddr_t va = 0;

	res = ldelf_dump_ftrace(&utc->uctx, NULL, &blen);
	if (res != TEE_ERROR_SHORT_BUFFER)
		return;

#define LOAD_ADDR_DUMP_SIZE	64
	pl_sz = ROUNDUP(blen + sizeof(TEE_UUID) + LOAD_ADDR_DUMP_SIZE,
			SMALL_PAGE_SIZE);

	mobj = thread_rpc_alloc_payload(pl_sz);
	if (!mobj) {
		EMSG("Ftrace thread_rpc_alloc_payload failed");
		return;
	}

	buf = mobj_get_va(mobj, 0, pl_sz);
	if (!buf)
		goto out_free_pl;

	res = vm_map(&utc->uctx, &va, mobj->size, prot, VM_FLAG_EPHEMERAL,
		     mobj, 0);
	if (res)
		goto out_free_pl;

	ubuf = (uint8_t *)va + mobj_get_phys_offs(mobj, mobj->phys_granule);
	memcpy(ubuf, &ctx->uuid, sizeof(TEE_UUID));
	ubuf += sizeof(TEE_UUID);

	if (sess->fbuf)
		dump_id = sess->fbuf->dump_id;
	/* only print the header when this is a new dump */
	if (!dump_id)
		ld_addr_len = snprintk((char *)ubuf, LOAD_ADDR_DUMP_SIZE,
				       "TEE load address @ %#"PRIxVA"\n",
				       VCORE_START_VA);
	ubuf += ld_addr_len;

	res = ldelf_dump_ftrace(&utc->uctx, ubuf, &blen);
	if (res) {
		EMSG("Ftrace dump failed: %#"PRIx32, res);
		goto out_unmap_pl;
	}

	params[0] = THREAD_PARAM_VALUE(INOUT, dump_id, 0, 0);
	params[1] = THREAD_PARAM_MEMREF(IN, mobj, 0, sizeof(TEE_UUID));
	params[2] = THREAD_PARAM_MEMREF(IN, mobj, sizeof(TEE_UUID),
					blen + ld_addr_len);

	res = thread_rpc_cmd(OPTEE_RPC_CMD_FTRACE, 3, params);
	if (res) {
		EMSG("Ftrace thread_rpc_cmd res: %#"PRIx32, res);
		goto out_unmap_pl;
	}
	if (sess->fbuf)
		sess->fbuf->dump_id = params[0].u.value.a;

out_unmap_pl:
	res = vm_unmap(&utc->uctx, va, mobj->size);
	assert(!res);
out_free_pl:
	thread_rpc_free_payload(mobj);
}
#endif /*CFG_FTRACE_SUPPORT*/

#ifdef CFG_TA_GPROF_SUPPORT
static void user_ta_gprof_set_status(enum ts_gprof_status status)
{
	if (status == TS_GPROF_SUSPEND)
		tee_ta_update_session_utime_suspend();
	else
		tee_ta_update_session_utime_resume();
}
#endif /*CFG_TA_GPROF_SUPPORT*/


static void release_utc_state(struct user_ta_ctx *utc)
{
	/*
	 * Close sessions opened by this TA
	 * Note that tee_ta_close_session() removes the item
	 * from the utc->open_sessions list.
	 */
	while (!TAILQ_EMPTY(&utc->open_sessions)) {
		tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
				     &utc->open_sessions, KERN_IDENTITY);
	}

	vm_info_final(&utc->uctx);

	/* Free cryp states created by this TA */
	tee_svc_cryp_free_states(utc);
	/* Close cryp objects opened by this TA */
	tee_obj_close_all(utc);
	/* Free emums created by this TA */
	tee_svc_storage_close_all_enum(utc);
}

static void free_utc(struct user_ta_ctx *utc)
{
	release_utc_state(utc);
	free(utc);
}

static void user_ta_release_state(struct ts_ctx *ctx)
{
	release_utc_state(to_user_ta_ctx(ctx));
}

static void user_ta_ctx_destroy(struct ts_ctx *ctx)
{
	free_utc(to_user_ta_ctx(ctx));
}

static uint32_t user_ta_get_instance_id(struct ts_ctx *ctx)
{
	return to_user_ta_ctx(ctx)->uctx.vm_info.asid;
}

/*
 * Note: this variable is weak just to ease breaking its dependency chain
 * when added to the unpaged area.
 */
const struct ts_ops user_ta_ops __weak __relrodata_unpaged("user_ta_ops") = {
	.enter_open_session = user_ta_enter_open_session,
	.enter_invoke_cmd = user_ta_enter_invoke_cmd,
	.enter_close_session = user_ta_enter_close_session,
#if defined(CFG_TA_STATS)
	.dump_mem_stats = user_ta_enter_dump_memstats,
#endif
	.dump_state = user_ta_dump_state,
#ifdef CFG_FTRACE_SUPPORT
	.dump_ftrace = user_ta_dump_ftrace,
#endif
	.release_state = user_ta_release_state,
	.destroy = user_ta_ctx_destroy,
	.get_instance_id = user_ta_get_instance_id,
	.handle_scall = scall_handle_user_ta,
#ifdef CFG_TA_GPROF_SUPPORT
	.gprof_set_status = user_ta_gprof_set_status,
#endif
};

static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
{
	ctx->ts_ctx.ops = &user_ta_ops;
}

bool __noprof is_user_ta_ctx(struct ts_ctx *ctx)
{
	return ctx && ctx->ops == &user_ta_ops;
}

static TEE_Result check_ta_store(void)
{
	const struct ts_store_ops *op = NULL;

	SCATTERED_ARRAY_FOREACH(op, ta_stores, struct ts_store_ops)
		DMSG("TA store: \"%s\"", op->description);

	return TEE_SUCCESS;
}
service_init(check_ta_store);

TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
				       struct tee_ta_session *s)
{
	TEE_Result res = TEE_SUCCESS;
	struct user_ta_ctx *utc = NULL;
#ifdef CFG_TA_PAUTH
	uint8_t pauth_keys[sizeof(utc->uctx.keys)] = { };
#endif

	/*
	 * Caller is expected to hold tee_ta_mutex for safe changes
	 * in @s and registering of the context in tee_ctxes list.
	 */
	assert(mutex_is_locked(&tee_ta_mutex));

	utc = calloc(1, sizeof(struct user_ta_ctx));
	if (!utc)
		return TEE_ERROR_OUT_OF_MEMORY;

#ifdef CFG_TA_PAUTH
	res = crypto_rng_read(pauth_keys, sizeof(pauth_keys));
	if (res) {
		free(utc);
		return res;
	}
#endif

	TAILQ_INIT(&utc->open_sessions);
	TAILQ_INIT(&utc->cryp_states);
	TAILQ_INIT(&utc->objects);
	TAILQ_INIT(&utc->storage_enums);
	condvar_init(&utc->ta_ctx.busy_cv);
	utc->ta_ctx.ref_count = 1;

	/*
	 * Set context TA operation structure. It is required by generic
	 * implementation to identify userland TA versus pseudo TA contexts.
	 */
	set_ta_ctx_ops(&utc->ta_ctx);

	utc->ta_ctx.ts_ctx.uuid = *uuid;
	res = vm_info_init(&utc->uctx, &utc->ta_ctx.ts_ctx);
	if (res) {
		condvar_destroy(&utc->ta_ctx.busy_cv);
		free_utc(utc);
		return res;
	}

	utc->ta_ctx.is_initializing = true;

	assert(!mutex_trylock(&tee_ta_mutex));

#ifdef CFG_TA_PAUTH
	memcpy(&utc->uctx.keys, pauth_keys, sizeof(pauth_keys));
	memzero_explicit(pauth_keys, sizeof(pauth_keys));
#endif

	s->ts_sess.ctx = &utc->ta_ctx.ts_ctx;
	s->ts_sess.handle_scall = s->ts_sess.ctx->ops->handle_scall;
	/*
	 * Another thread trying to load this same TA may need to wait
	 * until this context is fully initialized. This is needed to
	 * handle single instance TAs.
	 */
	TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ta_ctx, link);

	return TEE_SUCCESS;
}

TEE_Result tee_ta_complete_user_ta_session(struct tee_ta_session *s)
{
	struct user_ta_ctx *utc = to_user_ta_ctx(s->ts_sess.ctx);
	TEE_Result res = TEE_SUCCESS;

	/*
	 * We must not hold tee_ta_mutex while allocating page tables as
	 * that may otherwise lead to a deadlock.
	 */
	ts_push_current_session(&s->ts_sess);

	res = ldelf_load_ldelf(&utc->uctx);
	if (!res)
		res = ldelf_init_with_ldelf(&s->ts_sess, &utc->uctx);

	ts_pop_current_session();

	mutex_lock(&tee_ta_mutex);

	if (!res) {
		utc->ta_ctx.is_initializing = false;
	} else {
		s->ts_sess.ctx = NULL;
		TAILQ_REMOVE(&tee_ctxes, &utc->ta_ctx, link);
		condvar_destroy(&utc->ta_ctx.busy_cv);
		free_utc(utc);
	}

	/* The state has changed for the context, notify eventual waiters. */
	condvar_broadcast(&tee_ta_init_cv);

	mutex_unlock(&tee_ta_mutex);

	return res;
}
