1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2019, Linaro Limited 4 * Copyright (c) 2020, Arm Limited. 5 */ 6 7 #include <crypto/crypto.h> 8 #include <ffa.h> 9 #include <keep.h> 10 #include <kernel/abort.h> 11 #include <kernel/stmm_sp.h> 12 #include <kernel/thread_private.h> 13 #include <kernel/user_mode_ctx.h> 14 #include <mempool.h> 15 #include <mm/fobj.h> 16 #include <mm/mobj.h> 17 #include <mm/vm.h> 18 #include <pta_stmm.h> 19 #include <tee_api_defines_extensions.h> 20 #include <tee/tee_pobj.h> 21 #include <tee/tee_svc.h> 22 #include <tee/tee_svc_storage.h> 23 #include <zlib.h> 24 25 #ifdef ARM64 26 #define SVC_REGS_A0(_regs) ((_regs)->x0) 27 #define SVC_REGS_A1(_regs) ((_regs)->x1) 28 #define SVC_REGS_A2(_regs) ((_regs)->x2) 29 #define SVC_REGS_A3(_regs) ((_regs)->x3) 30 #define SVC_REGS_A4(_regs) ((_regs)->x4) 31 #define SVC_REGS_A5(_regs) ((_regs)->x5) 32 #define SVC_REGS_A6(_regs) ((_regs)->x6) 33 #define SVC_REGS_A7(_regs) ((_regs)->x7) 34 #define __FFA_SVC_RPMB_READ FFA_SVC_RPMB_READ 35 #define __FFA_SVC_RPMB_WRITE FFA_SVC_RPMB_WRITE 36 #define __FFA_SVC_MEMORY_ATTRIBUTES_GET FFA_SVC_MEMORY_ATTRIBUTES_GET_64 37 #define __FFA_SVC_MEMORY_ATTRIBUTES_SET FFA_SVC_MEMORY_ATTRIBUTES_SET_64 38 #define __FFA_MSG_SEND_DIRECT_RESP FFA_MSG_SEND_DIRECT_RESP_64 39 #define __FFA_MSG_SEND_DIRECT_REQ FFA_MSG_SEND_DIRECT_REQ_64 40 #endif 41 #ifdef ARM32 42 #define SVC_REGS_A0(_regs) ((_regs)->r0) 43 #define SVC_REGS_A1(_regs) ((_regs)->r1) 44 #define SVC_REGS_A2(_regs) ((_regs)->r2) 45 #define SVC_REGS_A3(_regs) ((_regs)->r3) 46 #define SVC_REGS_A4(_regs) ((_regs)->r4) 47 #define SVC_REGS_A5(_regs) ((_regs)->r5) 48 #define SVC_REGS_A6(_regs) ((_regs)->r6) 49 #define SVC_REGS_A7(_regs) ((_regs)->r7) 50 #define __FFA_SVC_RPMB_READ FFA_SVC_RPMB_READ_32 51 #define __FFA_SVC_RPMB_WRITE FFA_SVC_RPMB_WRITE_32 52 #define __FFA_SVC_MEMORY_ATTRIBUTES_GET FFA_SVC_MEMORY_ATTRIBUTES_GET_32 53 #define __FFA_SVC_MEMORY_ATTRIBUTES_SET FFA_SVC_MEMORY_ATTRIBUTES_SET_32 54 #define __FFA_MSG_SEND_DIRECT_RESP FFA_MSG_SEND_DIRECT_RESP_32 55 #define __FFA_MSG_SEND_DIRECT_REQ FFA_MSG_SEND_DIRECT_REQ_32 56 #endif 57 58 static const TEE_UUID stmm_uuid = PTA_STMM_UUID; 59 60 /* 61 * Once a complete FFA spec is added, these will become discoverable. 62 * Until then these are considered part of the internal ABI between 63 * OP-TEE and StMM. 64 */ 65 static const uint16_t stmm_id = 1U; 66 static const uint16_t stmm_pta_id = 2U; 67 static const uint16_t mem_mgr_id = 3U; 68 static const uint16_t ffa_storage_id = 4U; 69 70 static const unsigned int stmm_stack_size = 4 * SMALL_PAGE_SIZE; 71 static const unsigned int stmm_heap_size = 398 * SMALL_PAGE_SIZE; 72 static const unsigned int stmm_sec_buf_size = 4 * SMALL_PAGE_SIZE; 73 static const unsigned int stmm_ns_comm_buf_size = 4 * SMALL_PAGE_SIZE; 74 75 extern unsigned char stmm_image[]; 76 extern const unsigned int stmm_image_size; 77 extern const unsigned int stmm_image_uncompressed_size; 78 79 const TEE_UUID *stmm_get_uuid(void) 80 { 81 return &stmm_uuid; 82 } 83 84 static struct stmm_ctx *stmm_alloc_ctx(const TEE_UUID *uuid) 85 { 86 TEE_Result res = TEE_SUCCESS; 87 struct stmm_ctx *spc = NULL; 88 89 spc = calloc(1, sizeof(*spc)); 90 if (!spc) 91 return NULL; 92 93 spc->ta_ctx.ts_ctx.ops = &stmm_sp_ops; 94 spc->ta_ctx.ts_ctx.uuid = *uuid; 95 spc->ta_ctx.flags = TA_FLAG_SINGLE_INSTANCE | 96 TA_FLAG_INSTANCE_KEEP_ALIVE; 97 98 res = vm_info_init(&spc->uctx, &spc->ta_ctx.ts_ctx); 99 if (res) { 100 free(spc); 101 return NULL; 102 } 103 104 spc->ta_ctx.ref_count = 1; 105 condvar_init(&spc->ta_ctx.busy_cv); 106 107 return spc; 108 } 109 110 static TEE_Result stmm_enter_user_mode(struct stmm_ctx *spc) 111 { 112 uint32_t exceptions = 0; 113 uint32_t panic_code = 0; 114 uint32_t panicked = 0; 115 uint64_t cntkctl = 0; 116 117 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 118 cntkctl = read_cntkctl(); 119 write_cntkctl(cntkctl | CNTKCTL_PL0PCTEN); 120 121 #ifdef ARM32 122 /* Handle usr_lr in place of __thread_enter_user_mode() */ 123 thread_set_usr_lr(spc->regs.usr_lr); 124 #endif 125 126 __thread_enter_user_mode(&spc->regs, &panicked, &panic_code); 127 128 #ifdef ARM32 129 spc->regs.usr_lr = thread_get_usr_lr(); 130 #endif 131 132 write_cntkctl(cntkctl); 133 thread_unmask_exceptions(exceptions); 134 135 thread_user_clear_vfp(&spc->uctx); 136 137 if (panicked) { 138 abort_print_current_ts(); 139 DMSG("stmm panicked with code %#"PRIx32, panic_code); 140 return TEE_ERROR_TARGET_DEAD; 141 } 142 143 return TEE_SUCCESS; 144 } 145 146 #ifdef ARM64 147 static void init_stmm_regs(struct stmm_ctx *spc, unsigned long a0, 148 unsigned long a1, unsigned long sp, unsigned long pc) 149 { 150 spc->regs.x[0] = a0; 151 spc->regs.x[1] = a1; 152 spc->regs.sp = sp; 153 spc->regs.pc = pc; 154 } 155 #endif 156 157 #ifdef ARM32 158 static uint32_t __maybe_unused get_spsr(void) 159 { 160 uint32_t s = 0; 161 162 s = read_cpsr(); 163 s &= ~(CPSR_MODE_MASK | CPSR_T | ARM32_CPSR_IT_MASK); 164 s |= CPSR_MODE_USR; 165 166 return s; 167 } 168 169 static void init_stmm_regs(struct stmm_ctx *spc, unsigned long a0, 170 unsigned long a1, unsigned long sp, unsigned long pc) 171 { 172 spc->regs.r0 = a0; 173 spc->regs.r1 = a1; 174 spc->regs.usr_sp = sp; 175 spc->regs.cpsr = get_spsr(); 176 spc->regs.pc = pc; 177 } 178 #endif 179 180 static TEE_Result alloc_and_map_sp_fobj(struct stmm_ctx *spc, size_t sz, 181 uint32_t prot, vaddr_t *va) 182 { 183 size_t num_pgs = ROUNDUP(sz, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE; 184 struct fobj *fobj = fobj_ta_mem_alloc(num_pgs); 185 TEE_Result res = TEE_SUCCESS; 186 struct mobj *mobj = NULL; 187 188 mobj = mobj_with_fobj_alloc(fobj, NULL, TEE_MATTR_MEM_TYPE_TAGGED); 189 fobj_put(fobj); 190 if (!mobj) 191 return TEE_ERROR_OUT_OF_MEMORY; 192 193 res = vm_map(&spc->uctx, va, num_pgs * SMALL_PAGE_SIZE, 194 prot, 0, mobj, 0); 195 if (res) 196 mobj_put(mobj); 197 198 return TEE_SUCCESS; 199 } 200 201 static void *zalloc(void *opaque __unused, unsigned int items, 202 unsigned int size) 203 { 204 return mempool_alloc(mempool_default, items * size); 205 } 206 207 static void zfree(void *opaque __unused, void *address) 208 { 209 mempool_free(mempool_default, address); 210 } 211 212 static void uncompress_image(void *dst, size_t dst_size, void *src, 213 size_t src_size) 214 { 215 z_stream strm = { 216 .next_in = src, 217 .avail_in = src_size, 218 .next_out = dst, 219 .avail_out = dst_size, 220 .zalloc = zalloc, 221 .zfree = zfree, 222 }; 223 224 if (inflateInit(&strm) != Z_OK) 225 panic("inflateInit"); 226 227 if (inflate(&strm, Z_SYNC_FLUSH) != Z_STREAM_END) 228 panic("inflate"); 229 230 if (inflateEnd(&strm) != Z_OK) 231 panic("inflateEnd"); 232 } 233 234 static TEE_Result load_stmm(struct stmm_ctx *spc) 235 { 236 struct stmm_boot_info *boot_info = NULL; 237 struct stmm_mp_info *mp_info = NULL; 238 TEE_Result res = TEE_SUCCESS; 239 vaddr_t sp_addr = 0; 240 vaddr_t image_addr = 0; 241 vaddr_t heap_addr = 0; 242 vaddr_t stack_addr = 0; 243 vaddr_t sec_buf_addr = 0; 244 vaddr_t comm_buf_addr = 0; 245 unsigned int sp_size = 0; 246 unsigned int uncompressed_size_roundup = 0; 247 248 uncompressed_size_roundup = ROUNDUP(stmm_image_uncompressed_size, 249 SMALL_PAGE_SIZE); 250 sp_size = uncompressed_size_roundup + stmm_stack_size + 251 stmm_heap_size + stmm_sec_buf_size; 252 res = alloc_and_map_sp_fobj(spc, sp_size, 253 TEE_MATTR_PRW, &sp_addr); 254 if (res) 255 return res; 256 257 res = alloc_and_map_sp_fobj(spc, stmm_ns_comm_buf_size, 258 TEE_MATTR_URW | TEE_MATTR_PRW, 259 &comm_buf_addr); 260 /* 261 * We don't need to free the previous instance here, they'll all be 262 * handled during the destruction call (stmm_ctx_destroy()) 263 */ 264 if (res) 265 return res; 266 267 image_addr = sp_addr; 268 heap_addr = image_addr + uncompressed_size_roundup; 269 stack_addr = heap_addr + stmm_heap_size; 270 sec_buf_addr = stack_addr + stmm_stack_size; 271 272 vm_set_ctx(&spc->ta_ctx.ts_ctx); 273 uncompress_image((void *)image_addr, stmm_image_uncompressed_size, 274 stmm_image, stmm_image_size); 275 276 res = vm_set_prot(&spc->uctx, image_addr, uncompressed_size_roundup, 277 TEE_MATTR_URX | TEE_MATTR_PR); 278 if (res) 279 return res; 280 281 res = vm_set_prot(&spc->uctx, heap_addr, stmm_heap_size, 282 TEE_MATTR_URW | TEE_MATTR_PRW); 283 if (res) 284 return res; 285 286 res = vm_set_prot(&spc->uctx, stack_addr, stmm_stack_size, 287 TEE_MATTR_URW | TEE_MATTR_PRW); 288 if (res) 289 return res; 290 291 res = vm_set_prot(&spc->uctx, sec_buf_addr, stmm_sec_buf_size, 292 TEE_MATTR_URW | TEE_MATTR_PRW); 293 if (res) 294 return res; 295 296 DMSG("stmm load address %#"PRIxVA, image_addr); 297 298 boot_info = (struct stmm_boot_info *)sec_buf_addr; 299 mp_info = (struct stmm_mp_info *)(boot_info + 1); 300 *boot_info = (struct stmm_boot_info){ 301 .h.type = STMM_PARAM_SP_IMAGE_BOOT_INFO, 302 .h.version = STMM_PARAM_VERSION_1, 303 .h.size = sizeof(struct stmm_boot_info), 304 .h.attr = 0, 305 .sp_mem_base = sp_addr, 306 .sp_mem_limit = sp_addr + sp_size, 307 .sp_image_base = image_addr, 308 .sp_stack_base = stack_addr, 309 .sp_heap_base = heap_addr, 310 .sp_ns_comm_buf_base = comm_buf_addr, 311 .sp_shared_buf_base = sec_buf_addr, 312 .sp_image_size = stmm_image_size, 313 .sp_pcpu_stack_size = stmm_stack_size, 314 .sp_heap_size = stmm_heap_size, 315 .sp_ns_comm_buf_size = stmm_ns_comm_buf_size, 316 .sp_shared_buf_size = stmm_sec_buf_size, 317 .num_sp_mem_regions = 6, 318 .num_cpus = 1, 319 .mp_info = mp_info, 320 }; 321 mp_info->mpidr = read_mpidr(); 322 mp_info->linear_id = 0; 323 mp_info->flags = MP_INFO_FLAG_PRIMARY_CPU; 324 spc->ns_comm_buf_addr = comm_buf_addr; 325 spc->ns_comm_buf_size = stmm_ns_comm_buf_size; 326 327 init_stmm_regs(spc, sec_buf_addr, 328 (vaddr_t)(mp_info + 1) - sec_buf_addr, 329 stack_addr + stmm_stack_size, image_addr); 330 331 return stmm_enter_user_mode(spc); 332 } 333 334 TEE_Result stmm_init_session(const TEE_UUID *uuid, struct tee_ta_session *sess) 335 { 336 struct stmm_ctx *spc = NULL; 337 TEE_Result res = TEE_SUCCESS; 338 339 if (memcmp(uuid, &stmm_uuid, sizeof(*uuid))) 340 return TEE_ERROR_ITEM_NOT_FOUND; 341 342 spc = stmm_alloc_ctx(uuid); 343 if (!spc) 344 return TEE_ERROR_OUT_OF_MEMORY; 345 346 spc->is_initializing = true; 347 348 mutex_lock(&tee_ta_mutex); 349 sess->ts_sess.ctx = &spc->ta_ctx.ts_ctx; 350 sess->ts_sess.handle_scall = sess->ts_sess.ctx->ops->handle_scall; 351 mutex_unlock(&tee_ta_mutex); 352 353 ts_push_current_session(&sess->ts_sess); 354 res = load_stmm(spc); 355 ts_pop_current_session(); 356 vm_set_ctx(NULL); 357 if (res) { 358 sess->ts_sess.ctx = NULL; 359 spc->ta_ctx.ts_ctx.ops->destroy(&spc->ta_ctx.ts_ctx); 360 361 return res; 362 } 363 364 mutex_lock(&tee_ta_mutex); 365 spc->is_initializing = false; 366 TAILQ_INSERT_TAIL(&tee_ctxes, &spc->ta_ctx, link); 367 mutex_unlock(&tee_ta_mutex); 368 369 return TEE_SUCCESS; 370 } 371 372 static TEE_Result stmm_enter_open_session(struct ts_session *s) 373 { 374 struct stmm_ctx *spc = to_stmm_ctx(s->ctx); 375 struct tee_ta_session *ta_sess = to_ta_session(s); 376 const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_NONE, 377 TEE_PARAM_TYPE_NONE, 378 TEE_PARAM_TYPE_NONE, 379 TEE_PARAM_TYPE_NONE); 380 381 if (ta_sess->param->types != exp_pt) 382 return TEE_ERROR_BAD_PARAMETERS; 383 384 if (spc->is_initializing) { 385 /* StMM is initialized in stmm_init_session() */ 386 ta_sess->err_origin = TEE_ORIGIN_TEE; 387 return TEE_ERROR_BAD_STATE; 388 } 389 390 return TEE_SUCCESS; 391 } 392 393 static TEE_Result stmm_enter_invoke_cmd(struct ts_session *s, uint32_t cmd) 394 { 395 struct stmm_ctx *spc = to_stmm_ctx(s->ctx); 396 struct tee_ta_session *ta_sess = to_ta_session(s); 397 TEE_Result res = TEE_SUCCESS; 398 TEE_Result __maybe_unused tmp_res = TEE_SUCCESS; 399 unsigned int ns_buf_size = 0; 400 struct param_mem *mem = NULL; 401 void *va = NULL; 402 const uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT, 403 TEE_PARAM_TYPE_VALUE_OUTPUT, 404 TEE_PARAM_TYPE_NONE, 405 TEE_PARAM_TYPE_NONE); 406 407 if (cmd != PTA_STMM_CMD_COMMUNICATE) 408 return TEE_ERROR_BAD_PARAMETERS; 409 410 if (ta_sess->param->types != exp_pt) 411 return TEE_ERROR_BAD_PARAMETERS; 412 413 mem = &ta_sess->param->u[0].mem; 414 ns_buf_size = mem->size; 415 if (ns_buf_size > spc->ns_comm_buf_size) { 416 mem->size = spc->ns_comm_buf_size; 417 return TEE_ERROR_EXCESS_DATA; 418 } 419 420 res = mobj_inc_map(mem->mobj); 421 if (res) 422 return res; 423 424 va = mobj_get_va(mem->mobj, mem->offs, mem->size); 425 if (!va) { 426 EMSG("Can't get a valid VA for NS buffer"); 427 res = TEE_ERROR_BAD_PARAMETERS; 428 goto out_va; 429 } 430 431 #ifdef ARM64 432 spc->regs.x[0] = __FFA_MSG_SEND_DIRECT_REQ; 433 spc->regs.x[1] = (stmm_pta_id << 16) | stmm_id; 434 spc->regs.x[2] = FFA_PARAM_MBZ; 435 spc->regs.x[3] = spc->ns_comm_buf_addr; 436 spc->regs.x[4] = ns_buf_size; 437 spc->regs.x[5] = 0; 438 spc->regs.x[6] = 0; 439 spc->regs.x[7] = 0; 440 #endif 441 #ifdef ARM32 442 spc->regs.r0 = __FFA_MSG_SEND_DIRECT_REQ; 443 spc->regs.r1 = (stmm_pta_id << 16) | stmm_id; 444 spc->regs.r2 = FFA_PARAM_MBZ; 445 spc->regs.r3 = spc->ns_comm_buf_addr; 446 spc->regs.r4 = ns_buf_size; 447 spc->regs.r5 = 0; 448 spc->regs.r6 = 0; 449 spc->regs.r7 = 0; 450 #endif 451 452 ts_push_current_session(s); 453 454 memcpy((void *)spc->ns_comm_buf_addr, va, ns_buf_size); 455 456 res = stmm_enter_user_mode(spc); 457 if (res) 458 goto out_session; 459 /* 460 * Copy the SPM response from secure partition back to the non-secure 461 * buffer of the client that called us. 462 */ 463 #ifdef ARM64 464 ta_sess->param->u[1].val.a = spc->regs.x[4]; 465 #endif 466 #ifdef ARM32 467 ta_sess->param->u[1].val.a = spc->regs.r4; 468 #endif 469 470 memcpy(va, (void *)spc->ns_comm_buf_addr, ns_buf_size); 471 472 out_session: 473 ts_pop_current_session(); 474 out_va: 475 tmp_res = mobj_dec_map(mem->mobj); 476 assert(!tmp_res); 477 478 return res; 479 } 480 481 static void stmm_enter_close_session(struct ts_session *s __unused) 482 { 483 } 484 485 static void stmm_dump_state(struct ts_ctx *ctx) 486 { 487 user_mode_ctx_print_mappings(to_user_mode_ctx(ctx)); 488 } 489 DECLARE_KEEP_PAGER(stmm_dump_state); 490 491 static uint32_t stmm_get_instance_id(struct ts_ctx *ctx) 492 { 493 return to_stmm_ctx(ctx)->uctx.vm_info.asid; 494 } 495 496 static void stmm_ctx_destroy(struct ts_ctx *ctx) 497 { 498 struct stmm_ctx *spc = to_stmm_ctx(ctx); 499 500 vm_info_final(&spc->uctx); 501 free(spc); 502 } 503 504 static uint32_t sp_svc_get_mem_attr(vaddr_t va) 505 { 506 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 507 struct ts_session *sess = NULL; 508 struct stmm_ctx *spc = NULL; 509 uint16_t attrs = 0; 510 uint16_t perm = 0; 511 512 if (!va) 513 goto err; 514 515 sess = ts_get_current_session(); 516 spc = to_stmm_ctx(sess->ctx); 517 518 res = vm_get_prot(&spc->uctx, va, SMALL_PAGE_SIZE, &attrs); 519 if (res) 520 goto err; 521 522 if (attrs & TEE_MATTR_UR) 523 perm |= STMM_MEM_ATTR_ACCESS_RO; 524 else if (attrs & TEE_MATTR_UW) 525 perm |= STMM_MEM_ATTR_ACCESS_RW; 526 527 if (attrs & TEE_MATTR_UX) 528 perm |= STMM_MEM_ATTR_EXEC; 529 530 return perm; 531 err: 532 return STMM_RET_DENIED; 533 } 534 535 static int sp_svc_set_mem_attr(vaddr_t va, unsigned int nr_pages, uint32_t perm) 536 { 537 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 538 struct ts_session *sess = NULL; 539 struct stmm_ctx *spc = NULL; 540 size_t sz = 0; 541 uint32_t prot = 0; 542 543 if (!va || !nr_pages || MUL_OVERFLOW(nr_pages, SMALL_PAGE_SIZE, &sz)) 544 return STMM_RET_INVALID_PARAM; 545 546 if (perm & ~STMM_MEM_ATTR_ALL) 547 return STMM_RET_INVALID_PARAM; 548 549 sess = ts_get_current_session(); 550 spc = to_stmm_ctx(sess->ctx); 551 552 if ((perm & STMM_MEM_ATTR_ACCESS_MASK) == STMM_MEM_ATTR_ACCESS_RO) 553 prot |= TEE_MATTR_UR; 554 else if ((perm & STMM_MEM_ATTR_ACCESS_MASK) == STMM_MEM_ATTR_ACCESS_RW) 555 prot |= TEE_MATTR_URW; 556 557 if ((perm & STMM_MEM_ATTR_EXEC_NEVER) == STMM_MEM_ATTR_EXEC) 558 prot |= TEE_MATTR_UX; 559 560 res = vm_set_prot(&spc->uctx, va, sz, prot); 561 if (res) 562 return STMM_RET_DENIED; 563 564 return STMM_RET_SUCCESS; 565 } 566 567 #ifdef ARM64 568 static void save_sp_ctx(struct stmm_ctx *spc, 569 struct thread_scall_regs *regs) 570 { 571 size_t n = 0; 572 573 /* Save the return values from StMM */ 574 for (n = 0; n <= 7; n++) 575 spc->regs.x[n] = *(®s->x0 + n); 576 577 spc->regs.sp = regs->sp_el0; 578 spc->regs.pc = regs->elr; 579 spc->regs.cpsr = regs->spsr; 580 } 581 #endif 582 583 #ifdef ARM32 584 static void save_sp_ctx(struct stmm_ctx *spc, 585 struct thread_scall_regs *regs) 586 { 587 spc->regs.r0 = regs->r0; 588 spc->regs.r1 = regs->r1; 589 spc->regs.r2 = regs->r2; 590 spc->regs.r3 = regs->r3; 591 spc->regs.r4 = regs->r4; 592 spc->regs.r5 = regs->r5; 593 spc->regs.r6 = regs->r6; 594 spc->regs.r7 = regs->r7; 595 spc->regs.pc = regs->lr; 596 spc->regs.cpsr = regs->spsr; 597 spc->regs.usr_sp = thread_get_usr_sp(); 598 } 599 #endif 600 601 static void return_from_sp_helper(bool panic, uint32_t panic_code, 602 struct thread_scall_regs *regs) 603 { 604 struct ts_session *sess = ts_get_current_session(); 605 struct stmm_ctx *spc = to_stmm_ctx(sess->ctx); 606 607 if (panic) 608 spc->ta_ctx.panicked = true; 609 else 610 save_sp_ctx(spc, regs); 611 612 SVC_REGS_A0(regs) = 0; 613 SVC_REGS_A1(regs) = panic; 614 SVC_REGS_A2(regs) = panic_code; 615 } 616 617 static void service_compose_direct_resp(struct thread_scall_regs *regs, 618 uint32_t ret_val) 619 { 620 uint16_t src_id = 0; 621 uint16_t dst_id = 0; 622 623 /* extract from request */ 624 src_id = (SVC_REGS_A1(regs) >> 16) & UINT16_MAX; 625 dst_id = SVC_REGS_A1(regs) & UINT16_MAX; 626 627 /* compose message */ 628 SVC_REGS_A0(regs) = __FFA_MSG_SEND_DIRECT_RESP; 629 /* swap endpoint ids */ 630 SVC_REGS_A1(regs) = SHIFT_U32(dst_id, 16) | src_id; 631 SVC_REGS_A2(regs) = FFA_PARAM_MBZ; 632 SVC_REGS_A3(regs) = ret_val; 633 SVC_REGS_A4(regs) = 0; 634 SVC_REGS_A5(regs) = 0; 635 SVC_REGS_A6(regs) = 0; 636 SVC_REGS_A7(regs) = 0; 637 } 638 639 /* 640 * Combined read from secure partition, this will open, read and 641 * close the file object. 642 */ 643 static TEE_Result sec_storage_obj_read(unsigned long storage_id, char *obj_id, 644 unsigned long obj_id_len, void *data, 645 unsigned long len, unsigned long offset, 646 unsigned long flags) 647 { 648 const struct tee_file_operations *fops = NULL; 649 TEE_Result res = TEE_ERROR_BAD_STATE; 650 struct ts_session *sess = NULL; 651 struct tee_file_handle *fh = NULL; 652 struct tee_pobj *po = NULL; 653 size_t file_size = 0; 654 size_t read_len = 0; 655 656 fops = tee_svc_storage_file_ops(storage_id); 657 if (!fops) 658 return TEE_ERROR_ITEM_NOT_FOUND; 659 660 if (obj_id_len > TEE_OBJECT_ID_MAX_LEN) 661 return TEE_ERROR_BAD_PARAMETERS; 662 663 sess = ts_get_current_session(); 664 665 res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags, 666 false, fops, &po); 667 if (res != TEE_SUCCESS) 668 return res; 669 670 res = po->fops->open(po, &file_size, &fh); 671 if (res != TEE_SUCCESS) 672 goto out; 673 674 read_len = len; 675 res = po->fops->read(fh, offset, NULL, data, &read_len); 676 if (res == TEE_ERROR_CORRUPT_OBJECT) { 677 EMSG("Object corrupt"); 678 po->fops->remove(po); 679 } else if (res == TEE_SUCCESS && len != read_len) { 680 res = TEE_ERROR_CORRUPT_OBJECT; 681 } 682 683 po->fops->close(&fh); 684 685 out: 686 tee_pobj_release(po); 687 688 return res; 689 } 690 691 /* 692 * Combined write from secure partition, this will create/open, write and 693 * close the file object. 694 */ 695 static TEE_Result sec_storage_obj_write(unsigned long storage_id, char *obj_id, 696 unsigned long obj_id_len, void *data, 697 unsigned long len, unsigned long offset, 698 unsigned long flags) 699 700 { 701 const struct tee_file_operations *fops = NULL; 702 struct ts_session *sess = NULL; 703 struct tee_file_handle *fh = NULL; 704 TEE_Result res = TEE_SUCCESS; 705 struct tee_pobj *po = NULL; 706 707 fops = tee_svc_storage_file_ops(storage_id); 708 if (!fops) 709 return TEE_ERROR_ITEM_NOT_FOUND; 710 711 if (obj_id_len > TEE_OBJECT_ID_MAX_LEN) 712 return TEE_ERROR_BAD_PARAMETERS; 713 714 sess = ts_get_current_session(); 715 716 res = tee_pobj_get(&sess->ctx->uuid, obj_id, obj_id_len, flags, 717 false, fops, &po); 718 if (res != TEE_SUCCESS) 719 return res; 720 721 res = po->fops->open(po, NULL, &fh); 722 if (res == TEE_ERROR_ITEM_NOT_FOUND) 723 res = po->fops->create(po, false, NULL, 0, NULL, 0, 724 NULL, NULL, 0, &fh); 725 if (res == TEE_SUCCESS) { 726 res = po->fops->write(fh, offset, NULL, data, len); 727 po->fops->close(&fh); 728 } 729 730 tee_pobj_release(po); 731 732 return res; 733 } 734 735 static void stmm_handle_mem_mgr_service(struct thread_scall_regs *regs) 736 { 737 uint32_t action = SVC_REGS_A3(regs); 738 uintptr_t va = SVC_REGS_A4(regs); 739 uint32_t nr_pages = SVC_REGS_A5(regs); 740 uint32_t perm = SVC_REGS_A6(regs); 741 742 switch (action) { 743 case __FFA_SVC_MEMORY_ATTRIBUTES_GET: 744 service_compose_direct_resp(regs, sp_svc_get_mem_attr(va)); 745 break; 746 case __FFA_SVC_MEMORY_ATTRIBUTES_SET: 747 service_compose_direct_resp(regs, 748 sp_svc_set_mem_attr(va, nr_pages, 749 perm)); 750 break; 751 default: 752 EMSG("Undefined service id %#"PRIx32, action); 753 service_compose_direct_resp(regs, STMM_RET_INVALID_PARAM); 754 break; 755 } 756 } 757 758 static uint32_t tee2stmm_ret_val(TEE_Result res) 759 { 760 switch (res) { 761 case TEE_SUCCESS: 762 return STMM_RET_SUCCESS; 763 case TEE_ERROR_NOT_SUPPORTED: 764 return STMM_RET_NOT_SUPPORTED; 765 case TEE_ERROR_ACCESS_DENIED: 766 return STMM_RET_DENIED; 767 case TEE_ERROR_OUT_OF_MEMORY: 768 return STMM_RET_NO_MEM; 769 case TEE_ERROR_BAD_PARAMETERS: 770 default: 771 return STMM_RET_INVALID_PARAM; 772 } 773 } 774 775 #define FILENAME "EFI_VARS" 776 static void stmm_handle_storage_service(struct thread_scall_regs *regs) 777 { 778 uint32_t flags = TEE_DATA_FLAG_ACCESS_READ | 779 TEE_DATA_FLAG_ACCESS_WRITE | 780 TEE_DATA_FLAG_SHARE_READ | 781 TEE_DATA_FLAG_SHARE_WRITE; 782 uint32_t action = SVC_REGS_A3(regs); 783 void *va = (void *)SVC_REGS_A4(regs); 784 unsigned long len = SVC_REGS_A5(regs); 785 unsigned long offset = SVC_REGS_A6(regs); 786 char obj_id[] = FILENAME; 787 size_t obj_id_len = strlen(obj_id); 788 TEE_Result res = TEE_SUCCESS; 789 uint32_t stmm_rc = STMM_RET_INVALID_PARAM; 790 791 switch (action) { 792 case __FFA_SVC_RPMB_READ: 793 DMSG("RPMB read"); 794 res = sec_storage_obj_read(TEE_STORAGE_PRIVATE_RPMB, obj_id, 795 obj_id_len, va, len, offset, flags); 796 stmm_rc = tee2stmm_ret_val(res); 797 break; 798 case __FFA_SVC_RPMB_WRITE: 799 DMSG("RPMB write"); 800 res = sec_storage_obj_write(TEE_STORAGE_PRIVATE_RPMB, obj_id, 801 obj_id_len, va, len, offset, flags); 802 stmm_rc = tee2stmm_ret_val(res); 803 break; 804 default: 805 EMSG("Undefined service id %#"PRIx32, action); 806 break; 807 } 808 809 service_compose_direct_resp(regs, stmm_rc); 810 } 811 812 static void spm_eret_error(int32_t error_code, struct thread_scall_regs *regs) 813 { 814 SVC_REGS_A0(regs) = FFA_ERROR; 815 SVC_REGS_A1(regs) = FFA_PARAM_MBZ; 816 SVC_REGS_A2(regs) = error_code; 817 SVC_REGS_A3(regs) = FFA_PARAM_MBZ; 818 SVC_REGS_A4(regs) = FFA_PARAM_MBZ; 819 SVC_REGS_A5(regs) = FFA_PARAM_MBZ; 820 SVC_REGS_A6(regs) = FFA_PARAM_MBZ; 821 SVC_REGS_A7(regs) = FFA_PARAM_MBZ; 822 } 823 824 static void spm_handle_direct_req(struct thread_scall_regs *regs) 825 { 826 uint16_t dst_id = SVC_REGS_A1(regs) & UINT16_MAX; 827 828 if (dst_id == mem_mgr_id) { 829 stmm_handle_mem_mgr_service(regs); 830 } else if (dst_id == ffa_storage_id) { 831 stmm_handle_storage_service(regs); 832 } else { 833 EMSG("Undefined endpoint id %#"PRIx16, dst_id); 834 spm_eret_error(STMM_RET_INVALID_PARAM, regs); 835 } 836 } 837 838 /* Return true if returning to SP, false if returning to caller */ 839 static bool spm_handle_scall(struct thread_scall_regs *regs) 840 { 841 #ifdef ARM64 842 uint64_t *a0 = ®s->x0; 843 #endif 844 #ifdef ARM32 845 uint32_t *a0 = ®s->r0; 846 #endif 847 848 switch (*a0) { 849 case FFA_VERSION: 850 DMSG("Received FFA version"); 851 *a0 = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 852 return true; 853 case __FFA_MSG_SEND_DIRECT_RESP: 854 DMSG("Received FFA direct response"); 855 return_from_sp_helper(false, 0, regs); 856 return false; 857 case __FFA_MSG_SEND_DIRECT_REQ: 858 DMSG("Received FFA direct request"); 859 spm_handle_direct_req(regs); 860 return true; 861 default: 862 EMSG("Undefined syscall %#"PRIx32, (uint32_t)*a0); 863 return_from_sp_helper(true /*panic*/, 0xabcd, regs); 864 return false; 865 } 866 } 867 868 /* 869 * Note: this variable is weak just to ease breaking its dependency chain 870 * when added to the unpaged area. 871 */ 872 const struct ts_ops stmm_sp_ops __weak __relrodata_unpaged("stmm_sp_ops") = { 873 .enter_open_session = stmm_enter_open_session, 874 .enter_invoke_cmd = stmm_enter_invoke_cmd, 875 .enter_close_session = stmm_enter_close_session, 876 .dump_state = stmm_dump_state, 877 .destroy = stmm_ctx_destroy, 878 .get_instance_id = stmm_get_instance_id, 879 .handle_scall = spm_handle_scall, 880 }; 881