xref: /optee_os/core/arch/arm/kernel/secure_partition.c (revision de66193d98491779bdbcc647d85bf6fb9a4d5e41)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Arm Limited.
4  */
5 #include <bench.h>
6 #include <crypto/crypto.h>
7 #include <initcall.h>
8 #include <kernel/embedded_ts.h>
9 #include <kernel/ldelf_loader.h>
10 #include <kernel/secure_partition.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/spmc_sp_handler.h>
13 #include <kernel/thread_spmc.h>
14 #include <kernel/ts_store.h>
15 #include <ldelf.h>
16 #include <mm/core_mmu.h>
17 #include <mm/fobj.h>
18 #include <mm/mobj.h>
19 #include <mm/vm.h>
20 #include <optee_ffa.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <tee_api_types.h>
24 #include <trace.h>
25 #include <types_ext.h>
26 #include <utee_defines.h>
27 #include <util.h>
28 #include <zlib.h>
29 
30 #include "thread_private.h"
31 
32 const struct ts_ops sp_ops;
33 
34 /* List that holds all of the loaded SP's */
35 static struct sp_sessions_head open_sp_sessions =
36 	TAILQ_HEAD_INITIALIZER(open_sp_sessions);
37 
38 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid)
39 {
40 	const struct embedded_ts *sp = NULL;
41 
42 	for_each_secure_partition(sp) {
43 		if (!memcmp(&sp->uuid, uuid, sizeof(*uuid)))
44 			return sp;
45 	}
46 	return NULL;
47 }
48 
49 bool is_sp_ctx(struct ts_ctx *ctx)
50 {
51 	return ctx && (ctx->ops == &sp_ops);
52 }
53 
54 static void set_sp_ctx_ops(struct ts_ctx *ctx)
55 {
56 	ctx->ops = &sp_ops;
57 }
58 
59 TEE_Result sp_find_session_id(const TEE_UUID *uuid, uint32_t *session_id)
60 {
61 	struct sp_session *s = NULL;
62 
63 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
64 		if (!memcmp(&s->ts_sess.ctx->uuid, uuid, sizeof(*uuid))) {
65 			if (s->state == sp_dead)
66 				return TEE_ERROR_TARGET_DEAD;
67 
68 			*session_id  = s->endpoint_id;
69 			return TEE_SUCCESS;
70 		}
71 	}
72 
73 	return TEE_ERROR_ITEM_NOT_FOUND;
74 }
75 
76 struct sp_session *sp_get_session(uint32_t session_id)
77 {
78 	struct sp_session *s = NULL;
79 
80 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
81 		if (s->endpoint_id == session_id)
82 			return s;
83 	}
84 
85 	return NULL;
86 }
87 
88 TEE_Result sp_partition_info_get_all(struct ffa_partition_info *fpi,
89 				     size_t *elem_count)
90 {
91 	size_t in_count = *elem_count;
92 	struct sp_session *s = NULL;
93 	size_t count = 0;
94 
95 	TAILQ_FOREACH(s, &open_sp_sessions, link) {
96 		if (s->state == sp_dead)
97 			continue;
98 		if (count < in_count) {
99 			spmc_fill_partition_entry(fpi, s->endpoint_id, 1);
100 			fpi++;
101 		}
102 		count++;
103 	}
104 
105 	*elem_count = count;
106 	if (count > in_count)
107 		return TEE_ERROR_SHORT_BUFFER;
108 
109 	return TEE_SUCCESS;
110 }
111 
112 bool sp_has_exclusive_access(struct sp_mem_map_region *mem,
113 			     struct user_mode_ctx *uctx)
114 {
115 	/*
116 	 * Check that we have access to the region if it is supposed to be
117 	 * mapped to the current context.
118 	 */
119 	if (uctx) {
120 		struct vm_region *region = NULL;
121 
122 		/* Make sure that each mobj belongs to the SP */
123 		TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
124 			if (region->mobj == mem->mobj)
125 				break;
126 		}
127 
128 		if (!region)
129 			return false;
130 	}
131 
132 	/* Check that it is not shared with another SP */
133 	return !sp_mem_is_shared(mem);
134 }
135 
136 static void sp_init_info(struct sp_ctx *ctx, struct thread_smc_args *args)
137 {
138 	struct sp_ffa_init_info *info = NULL;
139 
140 	/*
141 	 * When starting the SP for the first time a init_info struct is passed.
142 	 * Store the struct on the stack and store the address in x0
143 	 */
144 	ctx->uctx.stack_ptr -= ROUNDUP(sizeof(*info), STACK_ALIGNMENT);
145 
146 	info = (struct sp_ffa_init_info *)ctx->uctx.stack_ptr;
147 
148 	info->magic = 0;
149 	info->count = 0;
150 	args->a0 = (vaddr_t)info;
151 }
152 
153 static uint16_t new_session_id(struct sp_sessions_head *open_sessions)
154 {
155 	struct sp_session *last = NULL;
156 	uint16_t id = SPMC_ENDPOINT_ID + 1;
157 
158 	last = TAILQ_LAST(open_sessions, sp_sessions_head);
159 	if (last)
160 		id = last->endpoint_id + 1;
161 
162 	assert(id > SPMC_ENDPOINT_ID);
163 	return id;
164 }
165 
166 static TEE_Result sp_create_ctx(const TEE_UUID *uuid, struct sp_session *s)
167 {
168 	TEE_Result res = TEE_SUCCESS;
169 	struct sp_ctx *spc = NULL;
170 
171 	/* Register context */
172 	spc = calloc(1, sizeof(struct sp_ctx));
173 	if (!spc)
174 		return TEE_ERROR_OUT_OF_MEMORY;
175 
176 	spc->uctx.ts_ctx = &spc->ts_ctx;
177 	spc->open_session = s;
178 	s->ts_sess.ctx = &spc->ts_ctx;
179 	spc->ts_ctx.uuid = *uuid;
180 
181 	res = vm_info_init(&spc->uctx);
182 	if (res)
183 		goto err;
184 
185 	set_sp_ctx_ops(&spc->ts_ctx);
186 
187 	return TEE_SUCCESS;
188 
189 err:
190 	free(spc);
191 	return res;
192 }
193 
194 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions,
195 				    const TEE_UUID *uuid,
196 				    struct sp_session **sess)
197 {
198 	TEE_Result res = TEE_SUCCESS;
199 	struct sp_session *s = calloc(1, sizeof(struct sp_session));
200 
201 	if (!s)
202 		return TEE_ERROR_OUT_OF_MEMORY;
203 
204 	s->endpoint_id = new_session_id(open_sessions);
205 	if (!s->endpoint_id) {
206 		res = TEE_ERROR_OVERFLOW;
207 		goto err;
208 	}
209 
210 	DMSG("Loading Secure Partition %pUl", (void *)uuid);
211 	res = sp_create_ctx(uuid, s);
212 	if (res)
213 		goto err;
214 
215 	TAILQ_INSERT_TAIL(open_sessions, s, link);
216 	*sess = s;
217 	return TEE_SUCCESS;
218 
219 err:
220 	free(s);
221 	return res;
222 }
223 
224 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx)
225 {
226 	struct thread_ctx_regs *sp_regs = &ctx->sp_regs;
227 
228 	memset(sp_regs, 0, sizeof(*sp_regs));
229 	sp_regs->sp = ctx->uctx.stack_ptr;
230 	sp_regs->pc = ctx->uctx.entry_func;
231 
232 	return TEE_SUCCESS;
233 }
234 
235 TEE_Result sp_map_shared(struct sp_session *s,
236 			 struct sp_mem_receiver *receiver,
237 			 struct sp_mem *smem,
238 			 uint64_t *va)
239 {
240 	TEE_Result res = TEE_SUCCESS;
241 	struct sp_ctx *ctx = NULL;
242 	uint32_t perm = TEE_MATTR_UR;
243 	struct sp_mem_map_region *reg = NULL;
244 
245 	ctx = to_sp_ctx(s->ts_sess.ctx);
246 
247 	/* Get the permission */
248 	if (receiver->perm.perm & FFA_MEM_ACC_EXE)
249 		perm |= TEE_MATTR_UX;
250 
251 	if (receiver->perm.perm & FFA_MEM_ACC_RW) {
252 		if (receiver->perm.perm & FFA_MEM_ACC_EXE)
253 			return TEE_ERROR_ACCESS_CONFLICT;
254 
255 		perm |= TEE_MATTR_UW;
256 	}
257 	/*
258 	 * Currently we don't support passing a va. We can't guarantee that the
259 	 * full region will be mapped in a contiguous region. A smem->region can
260 	 * have multiple mobj for one share. Currently there doesn't seem to be
261 	 * an option to guarantee that these will be mapped in a contiguous va
262 	 * space.
263 	 */
264 	if (*va)
265 		return TEE_ERROR_NOT_SUPPORTED;
266 
267 	SLIST_FOREACH(reg, &smem->regions, link) {
268 		res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE,
269 			     perm, 0, reg->mobj, reg->page_offset);
270 
271 		if (res != TEE_SUCCESS) {
272 			EMSG("Failed to map memory region %#"PRIx32, res);
273 			return res;
274 		}
275 	}
276 	return TEE_SUCCESS;
277 }
278 
279 static TEE_Result sp_open_session(struct sp_session **sess,
280 				  struct sp_sessions_head *open_sessions,
281 				  const TEE_UUID *uuid)
282 {
283 	TEE_Result res = TEE_SUCCESS;
284 	struct sp_session *s = NULL;
285 	struct sp_ctx *ctx = NULL;
286 
287 	if (!find_secure_partition(uuid))
288 		return TEE_ERROR_ITEM_NOT_FOUND;
289 
290 	res = sp_create_session(open_sessions, uuid, &s);
291 	if (res != TEE_SUCCESS) {
292 		DMSG("sp_create_session failed %#"PRIx32, res);
293 		return res;
294 	}
295 
296 	ctx = to_sp_ctx(s->ts_sess.ctx);
297 	assert(ctx);
298 	if (!ctx)
299 		return TEE_ERROR_TARGET_DEAD;
300 	*sess = s;
301 
302 	ts_push_current_session(&s->ts_sess);
303 	/* Load the SP using ldelf. */
304 	ldelf_load_ldelf(&ctx->uctx);
305 	res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx);
306 
307 	if (res != TEE_SUCCESS) {
308 		EMSG("Failed. loading SP using ldelf %#"PRIx32, res);
309 		ts_pop_current_session();
310 		return TEE_ERROR_TARGET_DEAD;
311 	}
312 
313 	/* Make the SP ready for its first run */
314 	s->state = sp_idle;
315 	s->caller_id = 0;
316 	sp_init_set_registers(ctx);
317 	ts_pop_current_session();
318 
319 	return TEE_SUCCESS;
320 }
321 
322 static TEE_Result sp_init_uuid(const TEE_UUID *uuid)
323 {
324 	TEE_Result res = TEE_SUCCESS;
325 	struct sp_session *sess = NULL;
326 	struct thread_smc_args args = { };
327 
328 	res = sp_open_session(&sess,
329 			      &open_sp_sessions,
330 			      uuid);
331 	if (res)
332 		return res;
333 
334 	ts_push_current_session(&sess->ts_sess);
335 	sp_init_info(to_sp_ctx(sess->ts_sess.ctx), &args);
336 	ts_pop_current_session();
337 
338 	if (sp_enter(&args, sess))
339 		return FFA_ABORTED;
340 
341 	spmc_sp_msg_handler(&args, sess);
342 
343 	return TEE_SUCCESS;
344 }
345 
346 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp)
347 {
348 	TEE_Result res = FFA_OK;
349 	struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx);
350 
351 	ctx->sp_regs.x[0] = args->a0;
352 	ctx->sp_regs.x[1] = args->a1;
353 	ctx->sp_regs.x[2] = args->a2;
354 	ctx->sp_regs.x[3] = args->a3;
355 	ctx->sp_regs.x[4] = args->a4;
356 	ctx->sp_regs.x[5] = args->a5;
357 	ctx->sp_regs.x[6] = args->a6;
358 	ctx->sp_regs.x[7] = args->a7;
359 
360 	res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0);
361 
362 	args->a0 = ctx->sp_regs.x[0];
363 	args->a1 = ctx->sp_regs.x[1];
364 	args->a2 = ctx->sp_regs.x[2];
365 	args->a3 = ctx->sp_regs.x[3];
366 	args->a4 = ctx->sp_regs.x[4];
367 	args->a5 = ctx->sp_regs.x[5];
368 	args->a6 = ctx->sp_regs.x[6];
369 	args->a7 = ctx->sp_regs.x[7];
370 
371 	return res;
372 }
373 
374 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s,
375 				      uint32_t cmd __unused)
376 {
377 	struct sp_ctx *ctx = to_sp_ctx(s->ctx);
378 	TEE_Result res = TEE_SUCCESS;
379 	uint32_t exceptions = 0;
380 	uint64_t cpsr = 0;
381 	struct sp_session *sp_s = to_sp_session(s);
382 	struct ts_session *sess = NULL;
383 	struct thread_ctx_regs *sp_regs = NULL;
384 	uint32_t panicked = false;
385 	uint32_t panic_code = 0;
386 
387 	bm_timestamp();
388 
389 	sp_regs = &ctx->sp_regs;
390 	ts_push_current_session(s);
391 
392 	cpsr = sp_regs->cpsr;
393 	sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
394 
395 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
396 	__thread_enter_user_mode(sp_regs, &panicked, &panic_code);
397 	sp_regs->cpsr = cpsr;
398 	thread_unmask_exceptions(exceptions);
399 
400 	thread_user_clear_vfp(&ctx->uctx);
401 
402 	if (panicked) {
403 		DMSG("SP panicked with code  %#"PRIx32, panic_code);
404 		abort_print_current_ts();
405 
406 		sess = ts_pop_current_session();
407 		cpu_spin_lock(&sp_s->spinlock);
408 		sp_s->state = sp_dead;
409 		cpu_spin_unlock(&sp_s->spinlock);
410 
411 		return TEE_ERROR_TARGET_DEAD;
412 	}
413 
414 	sess = ts_pop_current_session();
415 	assert(sess == s);
416 
417 	bm_timestamp();
418 
419 	return res;
420 }
421 
422 /* We currently don't support 32 bits */
423 #ifdef ARM64
424 static void sp_svc_store_registers(struct thread_svc_regs *regs,
425 				   struct thread_ctx_regs *sp_regs)
426 {
427 	COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0));
428 	memcpy(sp_regs->x, &regs->x0, 31 * sizeof(regs->x0));
429 	sp_regs->pc = regs->elr;
430 	sp_regs->sp = regs->sp_el0;
431 }
432 #endif
433 
434 static bool sp_handle_svc(struct thread_svc_regs *regs)
435 {
436 	struct ts_session *ts = ts_get_current_session();
437 	struct sp_ctx *uctx = to_sp_ctx(ts->ctx);
438 	struct sp_session *s = uctx->open_session;
439 
440 	assert(s);
441 
442 	sp_svc_store_registers(regs, &uctx->sp_regs);
443 
444 	regs->x0 = 0;
445 	regs->x1 = 0; /* panic */
446 	regs->x2 = 0; /* panic code */
447 
448 	/*
449 	 * All the registers of the SP are saved in the SP session by the SVC
450 	 * handler.
451 	 * We always return to S-El1 after handling the SVC. We will continue
452 	 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode).
453 	 * The sp_enter() function copies the FF-A parameters (a0-a7) from the
454 	 * saved registers to the thread_smc_args. The thread_smc_args object is
455 	 * afterward used by the spmc_sp_msg_handler() to handle the
456 	 * FF-A message send by the SP.
457 	 */
458 	return false;
459 }
460 
461 /*
462  * Note: this variable is weak just to ease breaking its dependency chain
463  * when added to the unpaged area.
464  */
465 const struct ts_ops sp_ops __weak __rodata_unpaged("sp_ops") = {
466 	.enter_invoke_cmd = sp_enter_invoke_cmd,
467 	.handle_svc = sp_handle_svc,
468 };
469 
470 static TEE_Result sp_init_all(void)
471 {
472 	TEE_Result res = TEE_SUCCESS;
473 	const struct embedded_ts *sp = NULL;
474 	char __maybe_unused msg[60] = { '\0', };
475 
476 	for_each_secure_partition(sp) {
477 		if (sp->uncompressed_size)
478 			snprintf(msg, sizeof(msg),
479 				 " (compressed, uncompressed %u)",
480 				 sp->uncompressed_size);
481 		else
482 			msg[0] = '\0';
483 		DMSG("SP %pUl size %u%s", (void *)&sp->uuid, sp->size, msg);
484 
485 		res = sp_init_uuid(&sp->uuid);
486 
487 		if (res != TEE_SUCCESS) {
488 			EMSG("Failed initializing SP(%pUl) err:%#"PRIx32,
489 			     &sp->uuid, res);
490 			if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
491 				panic();
492 		}
493 	}
494 
495 	return TEE_SUCCESS;
496 }
497 
498 boot_final(sp_init_all);
499 
500 static TEE_Result secure_partition_open(const TEE_UUID *uuid,
501 					struct ts_store_handle **h)
502 {
503 	return emb_ts_open(uuid, h, find_secure_partition);
504 }
505 
506 REGISTER_SP_STORE(2) = {
507 	.description = "SP store",
508 	.open = secure_partition_open,
509 	.get_size = emb_ts_get_size,
510 	.get_tag = emb_ts_get_tag,
511 	.read = emb_ts_read,
512 	.close = emb_ts_close,
513 };
514