1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2022, Arm Limited. 4 */ 5 #include <bench.h> 6 #include <crypto/crypto.h> 7 #include <initcall.h> 8 #include <kernel/embedded_ts.h> 9 #include <kernel/ldelf_loader.h> 10 #include <kernel/secure_partition.h> 11 #include <kernel/spinlock.h> 12 #include <kernel/spmc_sp_handler.h> 13 #include <kernel/thread_private.h> 14 #include <kernel/thread_spmc.h> 15 #include <kernel/ts_store.h> 16 #include <ldelf.h> 17 #include <libfdt.h> 18 #include <mm/core_mmu.h> 19 #include <mm/fobj.h> 20 #include <mm/mobj.h> 21 #include <mm/vm.h> 22 #include <optee_ffa.h> 23 #include <stdio.h> 24 #include <string.h> 25 #include <tee_api_types.h> 26 #include <tee/uuid.h> 27 #include <trace.h> 28 #include <types_ext.h> 29 #include <utee_defines.h> 30 #include <util.h> 31 #include <zlib.h> 32 33 const struct ts_ops sp_ops; 34 35 /* List that holds all of the loaded SP's */ 36 static struct sp_sessions_head open_sp_sessions = 37 TAILQ_HEAD_INITIALIZER(open_sp_sessions); 38 39 static const struct embedded_ts *find_secure_partition(const TEE_UUID *uuid) 40 { 41 const struct sp_image *sp = NULL; 42 43 for_each_secure_partition(sp) { 44 if (!memcmp(&sp->image.uuid, uuid, sizeof(*uuid))) 45 return &sp->image; 46 } 47 return NULL; 48 } 49 50 bool is_sp_ctx(struct ts_ctx *ctx) 51 { 52 return ctx && (ctx->ops == &sp_ops); 53 } 54 55 static void set_sp_ctx_ops(struct ts_ctx *ctx) 56 { 57 ctx->ops = &sp_ops; 58 } 59 60 TEE_Result sp_find_session_id(const TEE_UUID *uuid, uint32_t *session_id) 61 { 62 struct sp_session *s = NULL; 63 64 TAILQ_FOREACH(s, &open_sp_sessions, link) { 65 if (!memcmp(&s->ts_sess.ctx->uuid, uuid, sizeof(*uuid))) { 66 if (s->state == sp_dead) 67 return TEE_ERROR_TARGET_DEAD; 68 69 *session_id = s->endpoint_id; 70 return TEE_SUCCESS; 71 } 72 } 73 74 return TEE_ERROR_ITEM_NOT_FOUND; 75 } 76 77 struct sp_session *sp_get_session(uint32_t session_id) 78 { 79 struct sp_session *s = NULL; 80 81 TAILQ_FOREACH(s, &open_sp_sessions, link) { 82 if (s->endpoint_id == session_id) 83 return s; 84 } 85 86 return NULL; 87 } 88 89 TEE_Result sp_partition_info_get_all(struct ffa_partition_info *fpi, 90 size_t *elem_count) 91 { 92 size_t in_count = *elem_count; 93 struct sp_session *s = NULL; 94 size_t count = 0; 95 96 TAILQ_FOREACH(s, &open_sp_sessions, link) { 97 if (s->state == sp_dead) 98 continue; 99 if (count < in_count) { 100 spmc_fill_partition_entry(fpi, s->endpoint_id, 1); 101 fpi++; 102 } 103 count++; 104 } 105 106 *elem_count = count; 107 if (count > in_count) 108 return TEE_ERROR_SHORT_BUFFER; 109 110 return TEE_SUCCESS; 111 } 112 113 bool sp_has_exclusive_access(struct sp_mem_map_region *mem, 114 struct user_mode_ctx *uctx) 115 { 116 /* 117 * Check that we have access to the region if it is supposed to be 118 * mapped to the current context. 119 */ 120 if (uctx) { 121 struct vm_region *region = NULL; 122 123 /* Make sure that each mobj belongs to the SP */ 124 TAILQ_FOREACH(region, &uctx->vm_info.regions, link) { 125 if (region->mobj == mem->mobj) 126 break; 127 } 128 129 if (!region) 130 return false; 131 } 132 133 /* Check that it is not shared with another SP */ 134 return !sp_mem_is_shared(mem); 135 } 136 137 static void sp_init_info(struct sp_ctx *ctx, struct thread_smc_args *args) 138 { 139 struct sp_ffa_init_info *info = NULL; 140 141 /* 142 * When starting the SP for the first time a init_info struct is passed. 143 * Store the struct on the stack and store the address in x0 144 */ 145 ctx->uctx.stack_ptr -= ROUNDUP(sizeof(*info), STACK_ALIGNMENT); 146 147 info = (struct sp_ffa_init_info *)ctx->uctx.stack_ptr; 148 149 /* magic field is 4 bytes, we don't copy /0 byte. */ 150 memcpy(&info->magic, "FF-A", 4); 151 info->count = 0; 152 args->a0 = (vaddr_t)info; 153 } 154 155 static uint16_t new_session_id(struct sp_sessions_head *open_sessions) 156 { 157 struct sp_session *last = NULL; 158 uint16_t id = SPMC_ENDPOINT_ID + 1; 159 160 last = TAILQ_LAST(open_sessions, sp_sessions_head); 161 if (last) 162 id = last->endpoint_id + 1; 163 164 assert(id > SPMC_ENDPOINT_ID); 165 return id; 166 } 167 168 static TEE_Result sp_create_ctx(const TEE_UUID *uuid, struct sp_session *s) 169 { 170 TEE_Result res = TEE_SUCCESS; 171 struct sp_ctx *spc = NULL; 172 173 /* Register context */ 174 spc = calloc(1, sizeof(struct sp_ctx)); 175 if (!spc) 176 return TEE_ERROR_OUT_OF_MEMORY; 177 178 spc->uctx.ts_ctx = &spc->ts_ctx; 179 spc->open_session = s; 180 s->ts_sess.ctx = &spc->ts_ctx; 181 spc->ts_ctx.uuid = *uuid; 182 183 res = vm_info_init(&spc->uctx); 184 if (res) 185 goto err; 186 187 set_sp_ctx_ops(&spc->ts_ctx); 188 189 return TEE_SUCCESS; 190 191 err: 192 free(spc); 193 return res; 194 } 195 196 static TEE_Result sp_create_session(struct sp_sessions_head *open_sessions, 197 const TEE_UUID *uuid, 198 struct sp_session **sess) 199 { 200 TEE_Result res = TEE_SUCCESS; 201 struct sp_session *s = calloc(1, sizeof(struct sp_session)); 202 203 if (!s) 204 return TEE_ERROR_OUT_OF_MEMORY; 205 206 s->endpoint_id = new_session_id(open_sessions); 207 if (!s->endpoint_id) { 208 res = TEE_ERROR_OVERFLOW; 209 goto err; 210 } 211 212 DMSG("Loading Secure Partition %pUl", (void *)uuid); 213 res = sp_create_ctx(uuid, s); 214 if (res) 215 goto err; 216 217 TAILQ_INSERT_TAIL(open_sessions, s, link); 218 *sess = s; 219 return TEE_SUCCESS; 220 221 err: 222 free(s); 223 return res; 224 } 225 226 static TEE_Result sp_init_set_registers(struct sp_ctx *ctx) 227 { 228 struct thread_ctx_regs *sp_regs = &ctx->sp_regs; 229 230 memset(sp_regs, 0, sizeof(*sp_regs)); 231 sp_regs->sp = ctx->uctx.stack_ptr; 232 sp_regs->pc = ctx->uctx.entry_func; 233 234 return TEE_SUCCESS; 235 } 236 237 TEE_Result sp_map_shared(struct sp_session *s, 238 struct sp_mem_receiver *receiver, 239 struct sp_mem *smem, 240 uint64_t *va) 241 { 242 TEE_Result res = TEE_SUCCESS; 243 struct sp_ctx *ctx = NULL; 244 uint32_t perm = TEE_MATTR_UR; 245 struct sp_mem_map_region *reg = NULL; 246 247 ctx = to_sp_ctx(s->ts_sess.ctx); 248 249 /* Get the permission */ 250 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 251 perm |= TEE_MATTR_UX; 252 253 if (receiver->perm.perm & FFA_MEM_ACC_RW) { 254 if (receiver->perm.perm & FFA_MEM_ACC_EXE) 255 return TEE_ERROR_ACCESS_CONFLICT; 256 257 perm |= TEE_MATTR_UW; 258 } 259 /* 260 * Currently we don't support passing a va. We can't guarantee that the 261 * full region will be mapped in a contiguous region. A smem->region can 262 * have multiple mobj for one share. Currently there doesn't seem to be 263 * an option to guarantee that these will be mapped in a contiguous va 264 * space. 265 */ 266 if (*va) 267 return TEE_ERROR_NOT_SUPPORTED; 268 269 SLIST_FOREACH(reg, &smem->regions, link) { 270 res = vm_map(&ctx->uctx, va, reg->page_count * SMALL_PAGE_SIZE, 271 perm, 0, reg->mobj, reg->page_offset); 272 273 if (res != TEE_SUCCESS) { 274 EMSG("Failed to map memory region %#"PRIx32, res); 275 return res; 276 } 277 } 278 return TEE_SUCCESS; 279 } 280 281 TEE_Result sp_unmap_ffa_regions(struct sp_session *s, struct sp_mem *smem) 282 { 283 TEE_Result res = TEE_SUCCESS; 284 vaddr_t vaddr = 0; 285 size_t len = 0; 286 struct sp_ctx *ctx = to_sp_ctx(s->ts_sess.ctx); 287 struct sp_mem_map_region *reg = NULL; 288 289 SLIST_FOREACH(reg, &smem->regions, link) { 290 vaddr = (vaddr_t)sp_mem_get_va(&ctx->uctx, reg->page_offset, 291 reg->mobj); 292 len = reg->page_count * SMALL_PAGE_SIZE; 293 294 res = vm_unmap(&ctx->uctx, vaddr, len); 295 if (res != TEE_SUCCESS) 296 return res; 297 } 298 299 return TEE_SUCCESS; 300 } 301 302 static TEE_Result sp_open_session(struct sp_session **sess, 303 struct sp_sessions_head *open_sessions, 304 const TEE_UUID *uuid) 305 { 306 TEE_Result res = TEE_SUCCESS; 307 struct sp_session *s = NULL; 308 struct sp_ctx *ctx = NULL; 309 310 if (!find_secure_partition(uuid)) 311 return TEE_ERROR_ITEM_NOT_FOUND; 312 313 res = sp_create_session(open_sessions, uuid, &s); 314 if (res != TEE_SUCCESS) { 315 DMSG("sp_create_session failed %#"PRIx32, res); 316 return res; 317 } 318 319 ctx = to_sp_ctx(s->ts_sess.ctx); 320 assert(ctx); 321 if (!ctx) 322 return TEE_ERROR_TARGET_DEAD; 323 *sess = s; 324 325 ts_push_current_session(&s->ts_sess); 326 /* Load the SP using ldelf. */ 327 ldelf_load_ldelf(&ctx->uctx); 328 res = ldelf_init_with_ldelf(&s->ts_sess, &ctx->uctx); 329 330 if (res != TEE_SUCCESS) { 331 EMSG("Failed. loading SP using ldelf %#"PRIx32, res); 332 ts_pop_current_session(); 333 return TEE_ERROR_TARGET_DEAD; 334 } 335 336 /* Make the SP ready for its first run */ 337 s->state = sp_idle; 338 s->caller_id = 0; 339 sp_init_set_registers(ctx); 340 ts_pop_current_session(); 341 342 return TEE_SUCCESS; 343 } 344 345 static TEE_Result handle_fdt(const void * const fdt, const TEE_UUID *uuid) 346 { 347 TEE_Result res = TEE_SUCCESS; 348 int len = 0; 349 const fdt32_t *prop = NULL; 350 int i = 0; 351 const struct fdt_property *description = NULL; 352 int description_name_len = 0; 353 uint32_t uuid_array[4] = { 0 }; 354 TEE_UUID fdt_uuid = {}; 355 356 res = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0"); 357 if (res) { 358 EMSG("Failed loading SP, manifest not found"); 359 return res; 360 } 361 362 description = fdt_get_property(fdt, 0, "description", 363 &description_name_len); 364 if (description) 365 DMSG("Loading SP: %s", description->data); 366 367 prop = fdt_getprop(fdt, 0, "uuid", &len); 368 if (!prop || len != 16) { 369 EMSG("Missing or invalid UUID in SP manifest"); 370 return TEE_ERROR_BAD_FORMAT; 371 } 372 373 for (i = 0; i < 4; i++) 374 uuid_array[i] = fdt32_to_cpu(prop[i]); 375 tee_uuid_from_octets(&fdt_uuid, (uint8_t *)uuid_array); 376 377 if (memcmp(uuid, &fdt_uuid, sizeof(fdt_uuid))) { 378 EMSG("Failed loading SP, UUID mismatch"); 379 return TEE_ERROR_BAD_FORMAT; 380 } 381 382 return TEE_SUCCESS; 383 } 384 385 static TEE_Result sp_init_uuid(const TEE_UUID *uuid, const void * const fdt) 386 { 387 TEE_Result res = TEE_SUCCESS; 388 struct sp_session *sess = NULL; 389 struct thread_smc_args args = { }; 390 391 res = handle_fdt(fdt, uuid); 392 393 if (res) 394 return res; 395 396 res = sp_open_session(&sess, 397 &open_sp_sessions, 398 uuid); 399 if (res) 400 return res; 401 402 ts_push_current_session(&sess->ts_sess); 403 sp_init_info(to_sp_ctx(sess->ts_sess.ctx), &args); 404 ts_pop_current_session(); 405 406 if (sp_enter(&args, sess)) 407 return FFA_ABORTED; 408 409 spmc_sp_msg_handler(&args, sess); 410 411 return TEE_SUCCESS; 412 } 413 414 TEE_Result sp_enter(struct thread_smc_args *args, struct sp_session *sp) 415 { 416 TEE_Result res = FFA_OK; 417 struct sp_ctx *ctx = to_sp_ctx(sp->ts_sess.ctx); 418 419 ctx->sp_regs.x[0] = args->a0; 420 ctx->sp_regs.x[1] = args->a1; 421 ctx->sp_regs.x[2] = args->a2; 422 ctx->sp_regs.x[3] = args->a3; 423 ctx->sp_regs.x[4] = args->a4; 424 ctx->sp_regs.x[5] = args->a5; 425 ctx->sp_regs.x[6] = args->a6; 426 ctx->sp_regs.x[7] = args->a7; 427 428 res = sp->ts_sess.ctx->ops->enter_invoke_cmd(&sp->ts_sess, 0); 429 430 args->a0 = ctx->sp_regs.x[0]; 431 args->a1 = ctx->sp_regs.x[1]; 432 args->a2 = ctx->sp_regs.x[2]; 433 args->a3 = ctx->sp_regs.x[3]; 434 args->a4 = ctx->sp_regs.x[4]; 435 args->a5 = ctx->sp_regs.x[5]; 436 args->a6 = ctx->sp_regs.x[6]; 437 args->a7 = ctx->sp_regs.x[7]; 438 439 return res; 440 } 441 442 static TEE_Result sp_enter_invoke_cmd(struct ts_session *s, 443 uint32_t cmd __unused) 444 { 445 struct sp_ctx *ctx = to_sp_ctx(s->ctx); 446 TEE_Result res = TEE_SUCCESS; 447 uint32_t exceptions = 0; 448 uint64_t cpsr = 0; 449 struct sp_session *sp_s = to_sp_session(s); 450 struct ts_session *sess = NULL; 451 struct thread_ctx_regs *sp_regs = NULL; 452 uint32_t panicked = false; 453 uint32_t panic_code = 0; 454 455 bm_timestamp(); 456 457 sp_regs = &ctx->sp_regs; 458 ts_push_current_session(s); 459 460 cpsr = sp_regs->cpsr; 461 sp_regs->cpsr = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 462 463 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 464 __thread_enter_user_mode(sp_regs, &panicked, &panic_code); 465 sp_regs->cpsr = cpsr; 466 thread_unmask_exceptions(exceptions); 467 468 thread_user_clear_vfp(&ctx->uctx); 469 470 if (panicked) { 471 DMSG("SP panicked with code %#"PRIx32, panic_code); 472 abort_print_current_ts(); 473 474 sess = ts_pop_current_session(); 475 cpu_spin_lock(&sp_s->spinlock); 476 sp_s->state = sp_dead; 477 cpu_spin_unlock(&sp_s->spinlock); 478 479 return TEE_ERROR_TARGET_DEAD; 480 } 481 482 sess = ts_pop_current_session(); 483 assert(sess == s); 484 485 bm_timestamp(); 486 487 return res; 488 } 489 490 /* We currently don't support 32 bits */ 491 #ifdef ARM64 492 static void sp_svc_store_registers(struct thread_svc_regs *regs, 493 struct thread_ctx_regs *sp_regs) 494 { 495 COMPILE_TIME_ASSERT(sizeof(sp_regs->x[0]) == sizeof(regs->x0)); 496 memcpy(sp_regs->x, ®s->x0, 31 * sizeof(regs->x0)); 497 sp_regs->pc = regs->elr; 498 sp_regs->sp = regs->sp_el0; 499 } 500 #endif 501 502 static bool sp_handle_svc(struct thread_svc_regs *regs) 503 { 504 struct ts_session *ts = ts_get_current_session(); 505 struct sp_ctx *uctx = to_sp_ctx(ts->ctx); 506 struct sp_session *s = uctx->open_session; 507 508 assert(s); 509 510 sp_svc_store_registers(regs, &uctx->sp_regs); 511 512 regs->x0 = 0; 513 regs->x1 = 0; /* panic */ 514 regs->x2 = 0; /* panic code */ 515 516 /* 517 * All the registers of the SP are saved in the SP session by the SVC 518 * handler. 519 * We always return to S-El1 after handling the SVC. We will continue 520 * in sp_enter_invoke_cmd() (return from __thread_enter_user_mode). 521 * The sp_enter() function copies the FF-A parameters (a0-a7) from the 522 * saved registers to the thread_smc_args. The thread_smc_args object is 523 * afterward used by the spmc_sp_msg_handler() to handle the 524 * FF-A message send by the SP. 525 */ 526 return false; 527 } 528 529 /* 530 * Note: this variable is weak just to ease breaking its dependency chain 531 * when added to the unpaged area. 532 */ 533 const struct ts_ops sp_ops __weak __relrodata_unpaged("sp_ops") = { 534 .enter_invoke_cmd = sp_enter_invoke_cmd, 535 .handle_svc = sp_handle_svc, 536 }; 537 538 static TEE_Result sp_init_all(void) 539 { 540 TEE_Result res = TEE_SUCCESS; 541 const struct sp_image *sp = NULL; 542 char __maybe_unused msg[60] = { '\0', }; 543 544 for_each_secure_partition(sp) { 545 if (sp->image.uncompressed_size) 546 snprintf(msg, sizeof(msg), 547 " (compressed, uncompressed %u)", 548 sp->image.uncompressed_size); 549 else 550 msg[0] = '\0'; 551 DMSG("SP %pUl size %u%s", (void *)&sp->image.uuid, 552 sp->image.size, msg); 553 554 res = sp_init_uuid(&sp->image.uuid, sp->fdt); 555 556 if (res != TEE_SUCCESS) { 557 EMSG("Failed initializing SP(%pUl) err:%#"PRIx32, 558 &sp->image.uuid, res); 559 if (!IS_ENABLED(CFG_SP_SKIP_FAILED)) 560 panic(); 561 } 562 } 563 564 return TEE_SUCCESS; 565 } 566 567 boot_final(sp_init_all); 568 569 static TEE_Result secure_partition_open(const TEE_UUID *uuid, 570 struct ts_store_handle **h) 571 { 572 return emb_ts_open(uuid, h, find_secure_partition); 573 } 574 575 REGISTER_SP_STORE(2) = { 576 .description = "SP store", 577 .open = secure_partition_open, 578 .get_size = emb_ts_get_size, 579 .get_tag = emb_ts_get_tag, 580 .read = emb_ts_read, 581 .close = emb_ts_close, 582 }; 583