1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2021, Linaro Limited. 4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/panic.h> 13 #include <kernel/secure_partition.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/spmc_sp_handler.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/thread.h> 18 #include <kernel/thread_private.h> 19 #include <kernel/thread_spmc.h> 20 #include <mm/core_mmu.h> 21 #include <mm/mobj.h> 22 #include <optee_ffa.h> 23 #include <optee_msg.h> 24 #include <optee_rpc_cmd.h> 25 #include <string.h> 26 #include <sys/queue.h> 27 #include <tee/entry_std.h> 28 #include <tee/uuid.h> 29 #include <util.h> 30 31 #if defined(CFG_CORE_SEL1_SPMC) 32 struct mem_share_state { 33 struct mobj_ffa *mf; 34 unsigned int page_count; 35 unsigned int region_count; 36 unsigned int current_page_idx; 37 }; 38 39 struct mem_frag_state { 40 struct mem_share_state share; 41 tee_mm_entry_t *mm; 42 unsigned int frag_offset; 43 SLIST_ENTRY(mem_frag_state) link; 44 }; 45 #endif 46 47 /* Initialized in spmc_init() below */ 48 static uint16_t my_endpoint_id; 49 #ifdef CFG_CORE_SEL1_SPMC 50 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 51 FFA_PART_PROP_DIRECT_REQ_SEND | 52 #ifdef ARM64 53 FFA_PART_PROP_AARCH64_STATE | 54 #endif 55 FFA_PART_PROP_IS_PE_ID; 56 57 static uint32_t my_uuid_words[] = { 58 /* 59 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 60 * SP, or 61 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 62 * logical partition, residing in the same exception level as the 63 * SPMC 64 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 65 */ 66 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, 67 }; 68 69 /* 70 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 71 * 72 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 73 * access this includes the use of content of struct ffa_rxtx::rx and 74 * @frag_state_head. 75 * 76 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 77 * ffa_rxtx::tx and false when it is owned by normal world. 78 * 79 * Note that we can't prevent normal world from updating the content of 80 * these buffers so we must always be careful when reading. while we hold 81 * the lock. 82 */ 83 84 static struct ffa_rxtx nw_rxtx; 85 86 static bool is_nw_buf(struct ffa_rxtx *rxtx) 87 { 88 return rxtx == &nw_rxtx; 89 } 90 91 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 92 SLIST_HEAD_INITIALIZER(&frag_state_head); 93 #else 94 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 95 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 96 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf }; 97 #endif 98 99 static uint32_t swap_src_dst(uint32_t src_dst) 100 { 101 return (src_dst >> 16) | (src_dst << 16); 102 } 103 104 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 105 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 106 { 107 *args = (struct thread_smc_args){ .a0 = fid, 108 .a1 = src_dst, 109 .a2 = w2, 110 .a3 = w3, 111 .a4 = w4, 112 .a5 = w5, }; 113 } 114 115 static uint32_t exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 116 { 117 /* 118 * No locking, if the caller does concurrent calls to this it's 119 * only making a mess for itself. We must be able to renegotiate 120 * the FF-A version in order to support differing versions between 121 * the loader and the driver. 122 */ 123 if (vers < FFA_VERSION_1_1) 124 rxtx->ffa_vers = FFA_VERSION_1_0; 125 else 126 rxtx->ffa_vers = FFA_VERSION_1_1; 127 128 return rxtx->ffa_vers; 129 } 130 131 #if defined(CFG_CORE_SEL1_SPMC) 132 void spmc_handle_version(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 133 { 134 spmc_set_args(args, exchange_version(args->a0, rxtx), FFA_PARAM_MBZ, 135 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, 136 FFA_PARAM_MBZ); 137 } 138 139 static void handle_features(struct thread_smc_args *args) 140 { 141 uint32_t ret_fid = 0; 142 uint32_t ret_w2 = FFA_PARAM_MBZ; 143 144 switch (args->a1) { 145 #ifdef ARM64 146 case FFA_RXTX_MAP_64: 147 #endif 148 case FFA_RXTX_MAP_32: 149 ret_fid = FFA_SUCCESS_32; 150 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 151 break; 152 #ifdef ARM64 153 case FFA_MEM_SHARE_64: 154 #endif 155 case FFA_MEM_SHARE_32: 156 ret_fid = FFA_SUCCESS_32; 157 /* 158 * Partition manager supports transmission of a memory 159 * transaction descriptor in a buffer dynamically allocated 160 * by the endpoint. 161 */ 162 ret_w2 = BIT(0); 163 break; 164 165 case FFA_ERROR: 166 case FFA_VERSION: 167 case FFA_SUCCESS_32: 168 #ifdef ARM64 169 case FFA_SUCCESS_64: 170 #endif 171 case FFA_FEATURES: 172 case FFA_MEM_FRAG_TX: 173 case FFA_MEM_RECLAIM: 174 case FFA_MSG_SEND_DIRECT_REQ_32: 175 case FFA_INTERRUPT: 176 case FFA_PARTITION_INFO_GET: 177 case FFA_RXTX_UNMAP: 178 case FFA_RX_RELEASE: 179 case FFA_FEATURE_MANAGED_EXIT_INTR: 180 ret_fid = FFA_SUCCESS_32; 181 break; 182 default: 183 ret_fid = FFA_ERROR; 184 ret_w2 = FFA_NOT_SUPPORTED; 185 break; 186 } 187 188 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 189 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 190 } 191 192 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 193 { 194 tee_mm_entry_t *mm = NULL; 195 196 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 197 return FFA_INVALID_PARAMETERS; 198 199 mm = tee_mm_alloc(&tee_mm_shm, sz); 200 if (!mm) 201 return FFA_NO_MEMORY; 202 203 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 204 sz / SMALL_PAGE_SIZE, 205 MEM_AREA_NSEC_SHM)) { 206 tee_mm_free(mm); 207 return FFA_INVALID_PARAMETERS; 208 } 209 210 *va_ret = (void *)tee_mm_get_smem(mm); 211 return 0; 212 } 213 214 static void unmap_buf(void *va, size_t sz) 215 { 216 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va); 217 218 assert(mm); 219 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 220 tee_mm_free(mm); 221 } 222 223 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 224 { 225 int rc = 0; 226 uint32_t ret_fid = FFA_ERROR; 227 unsigned int sz = 0; 228 paddr_t rx_pa = 0; 229 paddr_t tx_pa = 0; 230 void *rx = NULL; 231 void *tx = NULL; 232 233 cpu_spin_lock(&rxtx->spinlock); 234 235 if (args->a3 & GENMASK_64(63, 6)) { 236 rc = FFA_INVALID_PARAMETERS; 237 goto out; 238 } 239 240 sz = args->a3 * SMALL_PAGE_SIZE; 241 if (!sz) { 242 rc = FFA_INVALID_PARAMETERS; 243 goto out; 244 } 245 /* TX/RX are swapped compared to the caller */ 246 tx_pa = args->a2; 247 rx_pa = args->a1; 248 249 if (rxtx->size) { 250 rc = FFA_DENIED; 251 goto out; 252 } 253 254 /* 255 * If the buffer comes from a SP the address is virtual and already 256 * mapped. 257 */ 258 if (is_nw_buf(rxtx)) { 259 rc = map_buf(tx_pa, sz, &tx); 260 if (rc) 261 goto out; 262 rc = map_buf(rx_pa, sz, &rx); 263 if (rc) { 264 unmap_buf(tx, sz); 265 goto out; 266 } 267 rxtx->tx = tx; 268 rxtx->rx = rx; 269 } else { 270 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 271 rc = FFA_INVALID_PARAMETERS; 272 goto out; 273 } 274 275 if (!virt_to_phys((void *)tx_pa) || 276 !virt_to_phys((void *)rx_pa)) { 277 rc = FFA_INVALID_PARAMETERS; 278 goto out; 279 } 280 281 rxtx->tx = (void *)tx_pa; 282 rxtx->rx = (void *)rx_pa; 283 } 284 285 rxtx->size = sz; 286 rxtx->tx_is_mine = true; 287 ret_fid = FFA_SUCCESS_32; 288 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 289 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 290 out: 291 cpu_spin_unlock(&rxtx->spinlock); 292 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 293 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 294 } 295 296 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 297 { 298 uint32_t ret_fid = FFA_ERROR; 299 int rc = FFA_INVALID_PARAMETERS; 300 301 cpu_spin_lock(&rxtx->spinlock); 302 303 if (!rxtx->size) 304 goto out; 305 306 /* We don't unmap the SP memory as the SP might still use it */ 307 if (is_nw_buf(rxtx)) { 308 unmap_buf(rxtx->rx, rxtx->size); 309 unmap_buf(rxtx->tx, rxtx->size); 310 } 311 rxtx->size = 0; 312 rxtx->rx = NULL; 313 rxtx->tx = NULL; 314 ret_fid = FFA_SUCCESS_32; 315 rc = 0; 316 out: 317 cpu_spin_unlock(&rxtx->spinlock); 318 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 319 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 320 } 321 322 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 323 { 324 uint32_t ret_fid = 0; 325 int rc = 0; 326 327 cpu_spin_lock(&rxtx->spinlock); 328 /* The senders RX is our TX */ 329 if (!rxtx->size || rxtx->tx_is_mine) { 330 ret_fid = FFA_ERROR; 331 rc = FFA_DENIED; 332 } else { 333 ret_fid = FFA_SUCCESS_32; 334 rc = 0; 335 rxtx->tx_is_mine = true; 336 } 337 cpu_spin_unlock(&rxtx->spinlock); 338 339 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 340 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 341 } 342 343 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 344 { 345 return !w0 && !w1 && !w2 && !w3; 346 } 347 348 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 349 { 350 /* 351 * This depends on which UUID we have been assigned. 352 * TODO add a generic mechanism to obtain our UUID. 353 * 354 * The test below is for the hard coded UUID 355 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 356 */ 357 return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] && 358 w2 == my_uuid_words[2] && w3 == my_uuid_words[3]; 359 } 360 361 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 362 size_t idx, uint16_t endpoint_id, 363 uint16_t execution_context, 364 uint32_t part_props, 365 const uint32_t uuid_words[4]) 366 { 367 struct ffa_partition_info_x *fpi = NULL; 368 size_t fpi_size = sizeof(*fpi); 369 370 if (ffa_vers >= FFA_VERSION_1_1) 371 fpi_size += FFA_UUID_SIZE; 372 373 if ((idx + 1) * fpi_size > blen) 374 return TEE_ERROR_OUT_OF_MEMORY; 375 376 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 377 fpi->id = endpoint_id; 378 /* Number of execution contexts implemented by this partition */ 379 fpi->execution_context = execution_context; 380 381 fpi->partition_properties = part_props; 382 383 if (ffa_vers >= FFA_VERSION_1_1) { 384 if (uuid_words) 385 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 386 else 387 memset(fpi->uuid, 0, FFA_UUID_SIZE); 388 } 389 390 return TEE_SUCCESS; 391 } 392 393 static int handle_partition_info_get_all(size_t *elem_count, 394 struct ffa_rxtx *rxtx, bool count_only) 395 { 396 if (!count_only) { 397 /* Add OP-TEE SP */ 398 if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx, 399 rxtx->size, 0, my_endpoint_id, 400 CFG_TEE_CORE_NB_CORE, 401 my_part_props, my_uuid_words)) 402 return FFA_NO_MEMORY; 403 } 404 *elem_count = 1; 405 406 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 407 if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 408 NULL, elem_count, count_only)) 409 return FFA_NO_MEMORY; 410 } 411 412 return FFA_OK; 413 } 414 415 void spmc_handle_partition_info_get(struct thread_smc_args *args, 416 struct ffa_rxtx *rxtx) 417 { 418 TEE_Result res = TEE_SUCCESS; 419 uint32_t ret_fid = FFA_ERROR; 420 uint32_t rc = 0; 421 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 422 423 if (!count_only) { 424 cpu_spin_lock(&rxtx->spinlock); 425 426 if (!rxtx->size || !rxtx->tx_is_mine) { 427 rc = FFA_BUSY; 428 goto out; 429 } 430 } 431 432 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 433 size_t elem_count = 0; 434 435 ret_fid = handle_partition_info_get_all(&elem_count, rxtx, 436 count_only); 437 438 if (ret_fid) { 439 rc = ret_fid; 440 ret_fid = FFA_ERROR; 441 } else { 442 ret_fid = FFA_SUCCESS_32; 443 rc = elem_count; 444 } 445 446 goto out; 447 } 448 449 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 450 if (!count_only) { 451 res = spmc_fill_partition_entry(rxtx->ffa_vers, 452 rxtx->tx, rxtx->size, 0, 453 my_endpoint_id, 454 CFG_TEE_CORE_NB_CORE, 455 my_part_props, 456 my_uuid_words); 457 if (res) { 458 ret_fid = FFA_ERROR; 459 rc = FFA_INVALID_PARAMETERS; 460 goto out; 461 } 462 } 463 rc = 1; 464 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 465 uint32_t uuid_array[4] = { 0 }; 466 TEE_UUID uuid = { }; 467 size_t count = 0; 468 469 uuid_array[0] = args->a1; 470 uuid_array[1] = args->a2; 471 uuid_array[2] = args->a3; 472 uuid_array[3] = args->a4; 473 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 474 475 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 476 rxtx->size, &uuid, &count, 477 count_only); 478 if (res != TEE_SUCCESS) { 479 ret_fid = FFA_ERROR; 480 rc = FFA_INVALID_PARAMETERS; 481 goto out; 482 } 483 rc = count; 484 } else { 485 ret_fid = FFA_ERROR; 486 rc = FFA_INVALID_PARAMETERS; 487 goto out; 488 } 489 490 ret_fid = FFA_SUCCESS_32; 491 492 out: 493 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 494 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 495 if (!count_only) { 496 rxtx->tx_is_mine = false; 497 cpu_spin_unlock(&rxtx->spinlock); 498 } 499 } 500 #endif /*CFG_CORE_SEL1_SPMC*/ 501 502 static void handle_yielding_call(struct thread_smc_args *args) 503 { 504 TEE_Result res = 0; 505 506 thread_check_canaries(); 507 508 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 509 /* Note connection to struct thread_rpc_arg::ret */ 510 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 511 0); 512 res = TEE_ERROR_BAD_PARAMETERS; 513 } else { 514 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 515 args->a6, args->a7); 516 res = TEE_ERROR_BUSY; 517 } 518 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 519 swap_src_dst(args->a1), 0, res, 0, 0); 520 } 521 522 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 523 { 524 uint64_t cookie = reg_pair_to_64(a5, a4); 525 uint32_t res = 0; 526 527 res = mobj_ffa_unregister_by_cookie(cookie); 528 switch (res) { 529 case TEE_SUCCESS: 530 case TEE_ERROR_ITEM_NOT_FOUND: 531 return 0; 532 case TEE_ERROR_BUSY: 533 EMSG("res %#"PRIx32, res); 534 return FFA_BUSY; 535 default: 536 EMSG("res %#"PRIx32, res); 537 return FFA_INVALID_PARAMETERS; 538 } 539 } 540 541 static void handle_blocking_call(struct thread_smc_args *args) 542 { 543 switch (args->a3) { 544 case OPTEE_FFA_GET_API_VERSION: 545 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 546 swap_src_dst(args->a1), 0, 547 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 548 0); 549 break; 550 case OPTEE_FFA_GET_OS_VERSION: 551 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 552 swap_src_dst(args->a1), 0, 553 CFG_OPTEE_REVISION_MAJOR, 554 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1); 555 break; 556 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 557 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 558 swap_src_dst(args->a1), 0, 0, 559 THREAD_RPC_MAX_NUM_PARAMS, 560 OPTEE_FFA_SEC_CAP_ARG_OFFSET); 561 break; 562 case OPTEE_FFA_UNREGISTER_SHM: 563 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 564 swap_src_dst(args->a1), 0, 565 handle_unregister_shm(args->a4, args->a5), 0, 0); 566 break; 567 default: 568 EMSG("Unhandled blocking service ID %#"PRIx32, 569 (uint32_t)args->a3); 570 panic(); 571 } 572 } 573 574 static void handle_framework_direct_request(struct thread_smc_args *args, 575 struct ffa_rxtx *rxtx) 576 { 577 uint32_t w0 = FFA_ERROR; 578 uint32_t w1 = FFA_PARAM_MBZ; 579 uint32_t w2 = FFA_NOT_SUPPORTED; 580 uint32_t w3 = FFA_PARAM_MBZ; 581 582 switch (args->a2 & FFA_MSG_TYPE_MASK) { 583 case FFA_MSG_VERSION_REQ: 584 w0 = FFA_MSG_SEND_DIRECT_RESP_32; 585 w1 = swap_src_dst(args->a1); 586 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 587 w3 = exchange_version(args->a3, rxtx); 588 break; 589 default: 590 break; 591 } 592 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 593 } 594 595 static void handle_direct_request(struct thread_smc_args *args, 596 struct ffa_rxtx *rxtx) 597 { 598 if (IS_ENABLED(CFG_SECURE_PARTITION) && 599 FFA_DST(args->a1) != my_endpoint_id) { 600 spmc_sp_start_thread(args); 601 return; 602 } 603 604 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 605 handle_framework_direct_request(args, rxtx); 606 return; 607 } 608 609 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 610 handle_yielding_call(args); 611 else 612 handle_blocking_call(args); 613 } 614 615 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 616 struct ffa_mem_transaction_x *trans) 617 { 618 uint16_t mem_reg_attr = 0; 619 uint32_t flags = 0; 620 uint32_t count = 0; 621 uint32_t offs = 0; 622 uint32_t size = 0; 623 size_t n = 0; 624 625 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 626 return FFA_INVALID_PARAMETERS; 627 628 if (ffa_vers >= FFA_VERSION_1_1) { 629 struct ffa_mem_transaction_1_1 *descr = NULL; 630 631 if (blen < sizeof(*descr)) 632 return FFA_INVALID_PARAMETERS; 633 634 descr = buf; 635 trans->sender_id = READ_ONCE(descr->sender_id); 636 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 637 flags = READ_ONCE(descr->flags); 638 trans->global_handle = READ_ONCE(descr->global_handle); 639 trans->tag = READ_ONCE(descr->tag); 640 641 count = READ_ONCE(descr->mem_access_count); 642 size = READ_ONCE(descr->mem_access_size); 643 offs = READ_ONCE(descr->mem_access_offs); 644 } else { 645 struct ffa_mem_transaction_1_0 *descr = NULL; 646 647 if (blen < sizeof(*descr)) 648 return FFA_INVALID_PARAMETERS; 649 650 descr = buf; 651 trans->sender_id = READ_ONCE(descr->sender_id); 652 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 653 flags = READ_ONCE(descr->flags); 654 trans->global_handle = READ_ONCE(descr->global_handle); 655 trans->tag = READ_ONCE(descr->tag); 656 657 count = READ_ONCE(descr->mem_access_count); 658 size = sizeof(struct ffa_mem_access); 659 offs = offsetof(struct ffa_mem_transaction_1_0, 660 mem_access_array); 661 } 662 663 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 664 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 665 return FFA_INVALID_PARAMETERS; 666 667 /* Check that the endpoint memory access descriptor array fits */ 668 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 669 n > blen) 670 return FFA_INVALID_PARAMETERS; 671 672 trans->mem_reg_attr = mem_reg_attr; 673 trans->flags = flags; 674 trans->mem_access_size = size; 675 trans->mem_access_count = count; 676 trans->mem_access_offs = offs; 677 return 0; 678 } 679 680 #if defined(CFG_CORE_SEL1_SPMC) 681 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 682 unsigned int mem_access_count, uint8_t *acc_perms, 683 unsigned int *region_offs) 684 { 685 struct ffa_mem_access_perm *descr = NULL; 686 struct ffa_mem_access *mem_acc = NULL; 687 unsigned int n = 0; 688 689 for (n = 0; n < mem_access_count; n++) { 690 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 691 descr = &mem_acc->access_perm; 692 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) { 693 *acc_perms = READ_ONCE(descr->perm); 694 *region_offs = READ_ONCE(mem_acc[n].region_offs); 695 return 0; 696 } 697 } 698 699 return FFA_INVALID_PARAMETERS; 700 } 701 702 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf, 703 size_t blen, unsigned int *page_count, 704 unsigned int *region_count, size_t *addr_range_offs) 705 { 706 const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 707 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 708 struct ffa_mem_region *region_descr = NULL; 709 unsigned int region_descr_offs = 0; 710 uint8_t mem_acc_perm = 0; 711 size_t n = 0; 712 713 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 714 return FFA_INVALID_PARAMETERS; 715 716 /* Check that the access permissions matches what's expected */ 717 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 718 mem_trans->mem_access_size, 719 mem_trans->mem_access_count, 720 &mem_acc_perm, ®ion_descr_offs) || 721 mem_acc_perm != exp_mem_acc_perm) 722 return FFA_INVALID_PARAMETERS; 723 724 /* Check that the Composite memory region descriptor fits */ 725 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 726 n > blen) 727 return FFA_INVALID_PARAMETERS; 728 729 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 730 struct ffa_mem_region)) 731 return FFA_INVALID_PARAMETERS; 732 733 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 734 region_descr_offs); 735 *page_count = READ_ONCE(region_descr->total_page_count); 736 *region_count = READ_ONCE(region_descr->address_range_count); 737 *addr_range_offs = n; 738 return 0; 739 } 740 741 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 742 size_t flen) 743 { 744 unsigned int region_count = flen / sizeof(struct ffa_address_range); 745 struct ffa_address_range *arange = NULL; 746 unsigned int n = 0; 747 748 if (region_count > s->region_count) 749 region_count = s->region_count; 750 751 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 752 return FFA_INVALID_PARAMETERS; 753 arange = buf; 754 755 for (n = 0; n < region_count; n++) { 756 unsigned int page_count = READ_ONCE(arange[n].page_count); 757 uint64_t addr = READ_ONCE(arange[n].address); 758 759 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 760 addr, page_count)) 761 return FFA_INVALID_PARAMETERS; 762 } 763 764 s->region_count -= region_count; 765 if (s->region_count) 766 return region_count * sizeof(*arange); 767 768 if (s->current_page_idx != s->page_count) 769 return FFA_INVALID_PARAMETERS; 770 771 return 0; 772 } 773 774 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 775 { 776 int rc = 0; 777 778 rc = add_mem_share_helper(&s->share, buf, flen); 779 if (rc >= 0) { 780 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 781 /* We're not at the end of the descriptor yet */ 782 if (s->share.region_count) 783 return s->frag_offset; 784 785 /* We're done */ 786 rc = 0; 787 } else { 788 rc = FFA_INVALID_PARAMETERS; 789 } 790 } 791 792 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 793 if (rc < 0) 794 mobj_ffa_sel1_spmc_delete(s->share.mf); 795 else 796 mobj_ffa_push_to_inactive(s->share.mf); 797 free(s); 798 799 return rc; 800 } 801 802 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans, 803 void *buf) 804 { 805 struct ffa_mem_access_perm *perm = NULL; 806 struct ffa_mem_access *mem_acc = NULL; 807 808 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 809 return false; 810 811 if (mem_trans->mem_access_count < 1) 812 return false; 813 814 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 815 perm = &mem_acc->access_perm; 816 817 /* 818 * perm->endpoint_id is read here only to check if the endpoint is 819 * OP-TEE. We do read it later on again, but there are some additional 820 * checks there to make sure that the data is correct. 821 */ 822 return READ_ONCE(perm->endpoint_id) != my_endpoint_id; 823 } 824 825 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans, 826 tee_mm_entry_t *mm, void *buf, size_t blen, 827 size_t flen, uint64_t *global_handle) 828 { 829 int rc = 0; 830 struct mem_share_state share = { }; 831 size_t addr_range_offs = 0; 832 size_t n = 0; 833 834 rc = mem_share_init(mem_trans, buf, flen, &share.page_count, 835 &share.region_count, &addr_range_offs); 836 if (rc) 837 return rc; 838 839 if (MUL_OVERFLOW(share.region_count, 840 sizeof(struct ffa_address_range), &n) || 841 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 842 return FFA_INVALID_PARAMETERS; 843 844 share.mf = mobj_ffa_sel1_spmc_new(share.page_count); 845 if (!share.mf) 846 return FFA_NO_MEMORY; 847 848 if (flen != blen) { 849 struct mem_frag_state *s = calloc(sizeof(*s), 1); 850 851 if (!s) { 852 rc = FFA_NO_MEMORY; 853 goto err; 854 } 855 s->share = share; 856 s->mm = mm; 857 s->frag_offset = addr_range_offs; 858 859 SLIST_INSERT_HEAD(&frag_state_head, s, link); 860 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 861 flen - addr_range_offs); 862 863 if (rc >= 0) 864 *global_handle = mobj_ffa_get_cookie(share.mf); 865 866 return rc; 867 } 868 869 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 870 flen - addr_range_offs); 871 if (rc) { 872 /* 873 * Number of consumed bytes may be returned instead of 0 for 874 * done. 875 */ 876 rc = FFA_INVALID_PARAMETERS; 877 goto err; 878 } 879 880 *global_handle = mobj_ffa_push_to_inactive(share.mf); 881 882 return 0; 883 err: 884 mobj_ffa_sel1_spmc_delete(share.mf); 885 return rc; 886 } 887 888 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 889 unsigned int page_count, 890 uint64_t *global_handle, struct ffa_rxtx *rxtx) 891 { 892 struct ffa_mem_transaction_x mem_trans = { }; 893 int rc = 0; 894 size_t len = 0; 895 void *buf = NULL; 896 tee_mm_entry_t *mm = NULL; 897 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 898 899 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 900 return FFA_INVALID_PARAMETERS; 901 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 902 return FFA_INVALID_PARAMETERS; 903 904 /* 905 * Check that the length reported in flen is covered by len even 906 * if the offset is taken into account. 907 */ 908 if (len < flen || len - offs < flen) 909 return FFA_INVALID_PARAMETERS; 910 911 mm = tee_mm_alloc(&tee_mm_shm, len); 912 if (!mm) 913 return FFA_NO_MEMORY; 914 915 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 916 page_count, MEM_AREA_NSEC_SHM)) { 917 rc = FFA_INVALID_PARAMETERS; 918 goto out; 919 } 920 buf = (void *)(tee_mm_get_smem(mm) + offs); 921 922 cpu_spin_lock(&rxtx->spinlock); 923 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 924 if (!rc) 925 rc = add_mem_share(&mem_trans, mm, buf, blen, flen, 926 global_handle); 927 cpu_spin_unlock(&rxtx->spinlock); 928 if (rc > 0) 929 return rc; 930 931 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 932 out: 933 tee_mm_free(mm); 934 return rc; 935 } 936 937 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 938 uint64_t *global_handle, 939 struct ffa_rxtx *rxtx) 940 { 941 struct ffa_mem_transaction_x mem_trans = { }; 942 int rc = FFA_DENIED; 943 944 cpu_spin_lock(&rxtx->spinlock); 945 946 if (!rxtx->rx || flen > rxtx->size) 947 goto out; 948 949 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 950 &mem_trans); 951 if (rc) 952 goto out; 953 if (is_sp_share(&mem_trans, rxtx->rx)) { 954 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, 955 global_handle, NULL); 956 goto out; 957 } 958 959 rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen, 960 global_handle); 961 962 out: 963 cpu_spin_unlock(&rxtx->spinlock); 964 965 return rc; 966 } 967 968 static void handle_mem_share(struct thread_smc_args *args, 969 struct ffa_rxtx *rxtx) 970 { 971 uint32_t tot_len = args->a1; 972 uint32_t frag_len = args->a2; 973 uint64_t addr = args->a3; 974 uint32_t page_count = args->a4; 975 uint32_t ret_w1 = 0; 976 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 977 uint32_t ret_w3 = 0; 978 uint32_t ret_fid = FFA_ERROR; 979 uint64_t global_handle = 0; 980 int rc = 0; 981 982 /* Check that the MBZs are indeed 0 */ 983 if (args->a5 || args->a6 || args->a7) 984 goto out; 985 986 /* Check that fragment length doesn't exceed total length */ 987 if (frag_len > tot_len) 988 goto out; 989 990 /* Check for 32-bit calling convention */ 991 if (args->a0 == FFA_MEM_SHARE_32) 992 addr &= UINT32_MAX; 993 994 if (!addr) { 995 /* 996 * The memory transaction descriptor is passed via our rx 997 * buffer. 998 */ 999 if (page_count) 1000 goto out; 1001 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 1002 rxtx); 1003 } else { 1004 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 1005 &global_handle, rxtx); 1006 } 1007 if (rc < 0) { 1008 ret_w2 = rc; 1009 } else if (rc > 0) { 1010 ret_fid = FFA_MEM_FRAG_RX; 1011 ret_w3 = rc; 1012 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1013 } else { 1014 ret_fid = FFA_SUCCESS_32; 1015 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1016 } 1017 out: 1018 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1019 } 1020 1021 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1022 { 1023 struct mem_frag_state *s = NULL; 1024 1025 SLIST_FOREACH(s, &frag_state_head, link) 1026 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 1027 return s; 1028 1029 return NULL; 1030 } 1031 1032 static void handle_mem_frag_tx(struct thread_smc_args *args, 1033 struct ffa_rxtx *rxtx) 1034 { 1035 int rc = 0; 1036 uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2), 1037 READ_ONCE(args->a1)); 1038 size_t flen = READ_ONCE(args->a3); 1039 struct mem_frag_state *s = NULL; 1040 tee_mm_entry_t *mm = NULL; 1041 unsigned int page_count = 0; 1042 void *buf = NULL; 1043 uint32_t ret_w1 = 0; 1044 uint32_t ret_w2 = 0; 1045 uint32_t ret_w3 = 0; 1046 uint32_t ret_fid = 0; 1047 1048 /* 1049 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1050 * requests. 1051 */ 1052 1053 cpu_spin_lock(&rxtx->spinlock); 1054 1055 s = get_frag_state(global_handle); 1056 if (!s) { 1057 rc = FFA_INVALID_PARAMETERS; 1058 goto out; 1059 } 1060 1061 mm = s->mm; 1062 if (mm) { 1063 if (flen > tee_mm_get_bytes(mm)) { 1064 rc = FFA_INVALID_PARAMETERS; 1065 goto out; 1066 } 1067 page_count = s->share.page_count; 1068 buf = (void *)tee_mm_get_smem(mm); 1069 } else { 1070 if (flen > rxtx->size) { 1071 rc = FFA_INVALID_PARAMETERS; 1072 goto out; 1073 } 1074 buf = rxtx->rx; 1075 } 1076 1077 rc = add_mem_share_frag(s, buf, flen); 1078 out: 1079 cpu_spin_unlock(&rxtx->spinlock); 1080 1081 if (rc <= 0 && mm) { 1082 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1083 tee_mm_free(mm); 1084 } 1085 1086 if (rc < 0) { 1087 ret_fid = FFA_ERROR; 1088 ret_w2 = rc; 1089 } else if (rc > 0) { 1090 ret_fid = FFA_MEM_FRAG_RX; 1091 ret_w3 = rc; 1092 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1093 } else { 1094 ret_fid = FFA_SUCCESS_32; 1095 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1096 } 1097 1098 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1099 } 1100 1101 static void handle_mem_reclaim(struct thread_smc_args *args) 1102 { 1103 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1104 uint32_t ret_fid = FFA_ERROR; 1105 uint64_t cookie = 0; 1106 1107 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1108 goto out; 1109 1110 cookie = reg_pair_to_64(args->a2, args->a1); 1111 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1112 case TEE_SUCCESS: 1113 ret_fid = FFA_SUCCESS_32; 1114 ret_val = 0; 1115 break; 1116 case TEE_ERROR_ITEM_NOT_FOUND: 1117 DMSG("cookie %#"PRIx64" not found", cookie); 1118 ret_val = FFA_INVALID_PARAMETERS; 1119 break; 1120 default: 1121 DMSG("cookie %#"PRIx64" busy", cookie); 1122 ret_val = FFA_DENIED; 1123 break; 1124 } 1125 out: 1126 spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0); 1127 } 1128 #endif 1129 1130 /* Only called from assembly */ 1131 void thread_spmc_msg_recv(struct thread_smc_args *args); 1132 void thread_spmc_msg_recv(struct thread_smc_args *args) 1133 { 1134 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1135 switch (args->a0) { 1136 #if defined(CFG_CORE_SEL1_SPMC) 1137 case FFA_VERSION: 1138 spmc_handle_version(args, &nw_rxtx); 1139 break; 1140 case FFA_FEATURES: 1141 handle_features(args); 1142 break; 1143 #ifdef ARM64 1144 case FFA_RXTX_MAP_64: 1145 #endif 1146 case FFA_RXTX_MAP_32: 1147 spmc_handle_rxtx_map(args, &nw_rxtx); 1148 break; 1149 case FFA_RXTX_UNMAP: 1150 spmc_handle_rxtx_unmap(args, &nw_rxtx); 1151 break; 1152 case FFA_RX_RELEASE: 1153 spmc_handle_rx_release(args, &nw_rxtx); 1154 break; 1155 case FFA_PARTITION_INFO_GET: 1156 spmc_handle_partition_info_get(args, &nw_rxtx); 1157 break; 1158 #endif /*CFG_CORE_SEL1_SPMC*/ 1159 case FFA_INTERRUPT: 1160 interrupt_main_handler(); 1161 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1162 break; 1163 #ifdef ARM64 1164 case FFA_MSG_SEND_DIRECT_REQ_64: 1165 #endif 1166 case FFA_MSG_SEND_DIRECT_REQ_32: 1167 handle_direct_request(args, &nw_rxtx); 1168 break; 1169 #if defined(CFG_CORE_SEL1_SPMC) 1170 #ifdef ARM64 1171 case FFA_MEM_SHARE_64: 1172 #endif 1173 case FFA_MEM_SHARE_32: 1174 handle_mem_share(args, &nw_rxtx); 1175 break; 1176 case FFA_MEM_RECLAIM: 1177 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1178 !ffa_mem_reclaim(args, NULL)) 1179 handle_mem_reclaim(args); 1180 break; 1181 case FFA_MEM_FRAG_TX: 1182 handle_mem_frag_tx(args, &nw_rxtx); 1183 break; 1184 #endif /*CFG_CORE_SEL1_SPMC*/ 1185 default: 1186 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 1187 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1188 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1189 } 1190 } 1191 1192 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 1193 { 1194 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1195 struct thread_ctx *thr = threads + thread_get_id(); 1196 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1197 struct optee_msg_arg *arg = NULL; 1198 struct mobj *mobj = NULL; 1199 uint32_t num_params = 0; 1200 size_t sz = 0; 1201 1202 mobj = mobj_ffa_get_by_cookie(cookie, 0); 1203 if (!mobj) { 1204 EMSG("Can't find cookie %#"PRIx64, cookie); 1205 return TEE_ERROR_BAD_PARAMETERS; 1206 } 1207 1208 res = mobj_inc_map(mobj); 1209 if (res) 1210 goto out_put_mobj; 1211 1212 res = TEE_ERROR_BAD_PARAMETERS; 1213 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 1214 if (!arg) 1215 goto out_dec_map; 1216 1217 num_params = READ_ONCE(arg->num_params); 1218 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 1219 goto out_dec_map; 1220 1221 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 1222 1223 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 1224 if (!thr->rpc_arg) 1225 goto out_dec_map; 1226 1227 res = tee_entry_std(arg, num_params); 1228 1229 thread_rpc_shm_cache_clear(&thr->shm_cache); 1230 thr->rpc_arg = NULL; 1231 1232 out_dec_map: 1233 mobj_dec_map(mobj); 1234 out_put_mobj: 1235 mobj_put(mobj); 1236 return res; 1237 } 1238 1239 /* 1240 * Helper routine for the assembly function thread_std_smc_entry() 1241 * 1242 * Note: this function is weak just to make link_dummies_paged.c happy. 1243 */ 1244 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 1245 uint32_t a2, uint32_t a3, 1246 uint32_t a4, uint32_t a5 __unused) 1247 { 1248 /* 1249 * Arguments are supplied from handle_yielding_call() as: 1250 * a0 <- w1 1251 * a1 <- w3 1252 * a2 <- w4 1253 * a3 <- w5 1254 * a4 <- w6 1255 * a5 <- w7 1256 */ 1257 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 1258 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 1259 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 1260 return FFA_DENIED; 1261 } 1262 1263 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 1264 { 1265 uint64_t offs = tpm->u.memref.offs; 1266 1267 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 1268 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 1269 1270 param->u.fmem.offs_low = offs; 1271 param->u.fmem.offs_high = offs >> 32; 1272 if (param->u.fmem.offs_high != offs >> 32) 1273 return false; 1274 1275 param->u.fmem.size = tpm->u.memref.size; 1276 if (tpm->u.memref.mobj) { 1277 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 1278 1279 /* If a mobj is passed it better be one with a valid cookie. */ 1280 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 1281 return false; 1282 param->u.fmem.global_id = cookie; 1283 } else { 1284 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1285 } 1286 1287 return true; 1288 } 1289 1290 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 1291 struct thread_param *params, 1292 struct optee_msg_arg **arg_ret) 1293 { 1294 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1295 struct thread_ctx *thr = threads + thread_get_id(); 1296 struct optee_msg_arg *arg = thr->rpc_arg; 1297 1298 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1299 return TEE_ERROR_BAD_PARAMETERS; 1300 1301 if (!arg) { 1302 EMSG("rpc_arg not set"); 1303 return TEE_ERROR_GENERIC; 1304 } 1305 1306 memset(arg, 0, sz); 1307 arg->cmd = cmd; 1308 arg->num_params = num_params; 1309 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1310 1311 for (size_t n = 0; n < num_params; n++) { 1312 switch (params[n].attr) { 1313 case THREAD_PARAM_ATTR_NONE: 1314 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 1315 break; 1316 case THREAD_PARAM_ATTR_VALUE_IN: 1317 case THREAD_PARAM_ATTR_VALUE_OUT: 1318 case THREAD_PARAM_ATTR_VALUE_INOUT: 1319 arg->params[n].attr = params[n].attr - 1320 THREAD_PARAM_ATTR_VALUE_IN + 1321 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1322 arg->params[n].u.value.a = params[n].u.value.a; 1323 arg->params[n].u.value.b = params[n].u.value.b; 1324 arg->params[n].u.value.c = params[n].u.value.c; 1325 break; 1326 case THREAD_PARAM_ATTR_MEMREF_IN: 1327 case THREAD_PARAM_ATTR_MEMREF_OUT: 1328 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1329 if (!set_fmem(arg->params + n, params + n)) 1330 return TEE_ERROR_BAD_PARAMETERS; 1331 break; 1332 default: 1333 return TEE_ERROR_BAD_PARAMETERS; 1334 } 1335 } 1336 1337 if (arg_ret) 1338 *arg_ret = arg; 1339 1340 return TEE_SUCCESS; 1341 } 1342 1343 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 1344 struct thread_param *params) 1345 { 1346 for (size_t n = 0; n < num_params; n++) { 1347 switch (params[n].attr) { 1348 case THREAD_PARAM_ATTR_VALUE_OUT: 1349 case THREAD_PARAM_ATTR_VALUE_INOUT: 1350 params[n].u.value.a = arg->params[n].u.value.a; 1351 params[n].u.value.b = arg->params[n].u.value.b; 1352 params[n].u.value.c = arg->params[n].u.value.c; 1353 break; 1354 case THREAD_PARAM_ATTR_MEMREF_OUT: 1355 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1356 params[n].u.memref.size = arg->params[n].u.fmem.size; 1357 break; 1358 default: 1359 break; 1360 } 1361 } 1362 1363 return arg->ret; 1364 } 1365 1366 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1367 struct thread_param *params) 1368 { 1369 struct thread_rpc_arg rpc_arg = { .call = { 1370 .w1 = thread_get_tsd()->rpc_target_info, 1371 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1372 }, 1373 }; 1374 struct optee_msg_arg *arg = NULL; 1375 uint32_t ret = 0; 1376 1377 ret = get_rpc_arg(cmd, num_params, params, &arg); 1378 if (ret) 1379 return ret; 1380 1381 thread_rpc(&rpc_arg); 1382 1383 return get_rpc_arg_res(arg, num_params, params); 1384 } 1385 1386 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1387 { 1388 struct thread_rpc_arg rpc_arg = { .call = { 1389 .w1 = thread_get_tsd()->rpc_target_info, 1390 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1391 }, 1392 }; 1393 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 1394 uint32_t res2 = 0; 1395 uint32_t res = 0; 1396 1397 DMSG("freeing cookie %#"PRIx64, cookie); 1398 1399 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 1400 1401 mobj_put(mobj); 1402 res2 = mobj_ffa_unregister_by_cookie(cookie); 1403 if (res2) 1404 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 1405 cookie, res2); 1406 if (!res) 1407 thread_rpc(&rpc_arg); 1408 } 1409 1410 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1411 { 1412 struct thread_rpc_arg rpc_arg = { .call = { 1413 .w1 = thread_get_tsd()->rpc_target_info, 1414 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1415 }, 1416 }; 1417 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 1418 struct optee_msg_arg *arg = NULL; 1419 unsigned int internal_offset = 0; 1420 struct mobj *mobj = NULL; 1421 uint64_t cookie = 0; 1422 1423 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 1424 return NULL; 1425 1426 thread_rpc(&rpc_arg); 1427 1428 if (arg->num_params != 1 || 1429 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 1430 return NULL; 1431 1432 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 1433 cookie = READ_ONCE(arg->params->u.fmem.global_id); 1434 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 1435 if (!mobj) { 1436 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 1437 cookie, internal_offset); 1438 return NULL; 1439 } 1440 1441 assert(mobj_is_nonsec(mobj)); 1442 1443 if (mobj->size < size) { 1444 DMSG("Mobj %#"PRIx64": wrong size", cookie); 1445 mobj_put(mobj); 1446 return NULL; 1447 } 1448 1449 if (mobj_inc_map(mobj)) { 1450 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 1451 mobj_put(mobj); 1452 return NULL; 1453 } 1454 1455 return mobj; 1456 } 1457 1458 struct mobj *thread_rpc_alloc_payload(size_t size) 1459 { 1460 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 1461 } 1462 1463 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 1464 { 1465 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 1466 } 1467 1468 void thread_rpc_free_kernel_payload(struct mobj *mobj) 1469 { 1470 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj); 1471 } 1472 1473 void thread_rpc_free_payload(struct mobj *mobj) 1474 { 1475 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1476 mobj); 1477 } 1478 1479 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1480 { 1481 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 1482 } 1483 1484 void thread_rpc_free_global_payload(struct mobj *mobj) 1485 { 1486 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj), 1487 mobj); 1488 } 1489 1490 void thread_spmc_register_secondary_ep(vaddr_t ep) 1491 { 1492 unsigned long ret = 0; 1493 1494 /* Let the SPM know the entry point for secondary CPUs */ 1495 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 1496 1497 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 1498 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 1499 } 1500 1501 #if defined(CFG_CORE_SEL1_SPMC) 1502 static TEE_Result spmc_init(void) 1503 { 1504 my_endpoint_id = SPMC_ENDPOINT_ID; 1505 DMSG("My endpoint ID %#x", my_endpoint_id); 1506 1507 /* 1508 * If SPMD think we are version 1.0 it will report version 1.0 to 1509 * normal world regardless of what version we query the SPM with. 1510 * However, if SPMD think we are version 1.1 it will forward 1511 * queries from normal world to let us negotiate version. So by 1512 * setting version 1.0 here we should be compatible. 1513 * 1514 * Note that disagreement on negotiated version means that we'll 1515 * have communication problems with normal world. 1516 */ 1517 nw_rxtx.ffa_vers = FFA_VERSION_1_0; 1518 1519 return TEE_SUCCESS; 1520 } 1521 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 1522 static bool is_ffa_success(uint32_t fid) 1523 { 1524 #ifdef ARM64 1525 if (fid == FFA_SUCCESS_64) 1526 return true; 1527 #endif 1528 return fid == FFA_SUCCESS_32; 1529 } 1530 1531 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 1532 { 1533 struct thread_smc_args args = { 1534 #ifdef ARM64 1535 .a0 = FFA_RXTX_MAP_64, 1536 #else 1537 .a0 = FFA_RXTX_MAP_32, 1538 #endif 1539 .a1 = virt_to_phys(rxtx->tx), 1540 .a2 = virt_to_phys(rxtx->rx), 1541 .a3 = 1, 1542 }; 1543 1544 thread_smccc(&args); 1545 if (!is_ffa_success(args.a0)) { 1546 if (args.a0 == FFA_ERROR) 1547 EMSG("rxtx map failed with error %ld", args.a2); 1548 else 1549 EMSG("rxtx map failed"); 1550 panic(); 1551 } 1552 } 1553 1554 static uint16_t spmc_get_id(void) 1555 { 1556 struct thread_smc_args args = { 1557 .a0 = FFA_ID_GET, 1558 }; 1559 1560 thread_smccc(&args); 1561 if (!is_ffa_success(args.a0)) { 1562 if (args.a0 == FFA_ERROR) 1563 EMSG("Get id failed with error %ld", args.a2); 1564 else 1565 EMSG("Get id failed"); 1566 panic(); 1567 } 1568 1569 return args.a2; 1570 } 1571 1572 static uint32_t get_ffa_version(uint32_t my_version) 1573 { 1574 struct thread_smc_args args = { 1575 .a0 = FFA_VERSION, 1576 .a1 = my_version, 1577 }; 1578 1579 thread_smccc(&args); 1580 if (args.a0 & BIT(31)) { 1581 EMSG("FF-A version failed with error %ld", args.a0); 1582 panic(); 1583 } 1584 1585 return args.a0; 1586 } 1587 1588 static void *spmc_retrieve_req(uint64_t cookie, 1589 struct ffa_mem_transaction_x *trans) 1590 { 1591 struct ffa_mem_access *acc_descr_array = NULL; 1592 struct ffa_mem_access_perm *perm_descr = NULL; 1593 struct thread_smc_args args = { 1594 .a0 = FFA_MEM_RETRIEVE_REQ_32, 1595 .a3 = 0, /* Address, Using TX -> MBZ */ 1596 .a4 = 0, /* Using TX -> MBZ */ 1597 }; 1598 size_t size = 0; 1599 int rc = 0; 1600 1601 if (nw_rxtx.ffa_vers == FFA_VERSION_1_0) { 1602 struct ffa_mem_transaction_1_0 *trans_descr = nw_rxtx.tx; 1603 1604 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1605 memset(trans_descr, 0, size); 1606 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1607 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1608 trans_descr->global_handle = cookie; 1609 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1610 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1611 trans_descr->mem_access_count = 1; 1612 acc_descr_array = trans_descr->mem_access_array; 1613 } else { 1614 struct ffa_mem_transaction_1_1 *trans_descr = nw_rxtx.tx; 1615 1616 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1617 memset(trans_descr, 0, size); 1618 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1619 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1620 trans_descr->global_handle = cookie; 1621 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1622 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1623 trans_descr->mem_access_count = 1; 1624 trans_descr->mem_access_offs = sizeof(*trans_descr); 1625 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 1626 acc_descr_array = (void *)((vaddr_t)nw_rxtx.tx + 1627 sizeof(*trans_descr)); 1628 } 1629 acc_descr_array->region_offs = 0; 1630 acc_descr_array->reserved = 0; 1631 perm_descr = &acc_descr_array->access_perm; 1632 perm_descr->endpoint_id = my_endpoint_id; 1633 perm_descr->perm = FFA_MEM_ACC_RW; 1634 perm_descr->flags = 0; 1635 1636 args.a1 = size; /* Total Length */ 1637 args.a2 = size; /* Frag Length == Total length */ 1638 thread_smccc(&args); 1639 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 1640 if (args.a0 == FFA_ERROR) 1641 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 1642 cookie, (int)args.a2); 1643 else 1644 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 1645 cookie, args.a0); 1646 return NULL; 1647 } 1648 rc = spmc_read_mem_transaction(nw_rxtx.ffa_vers, nw_rxtx.tx, 1649 nw_rxtx.size, trans); 1650 if (rc) { 1651 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 1652 cookie, rc); 1653 return NULL; 1654 } 1655 1656 return nw_rxtx.rx; 1657 } 1658 1659 void thread_spmc_relinquish(uint64_t cookie) 1660 { 1661 struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx; 1662 struct thread_smc_args args = { 1663 .a0 = FFA_MEM_RELINQUISH, 1664 }; 1665 1666 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 1667 relinquish_desc->handle = cookie; 1668 relinquish_desc->flags = 0; 1669 relinquish_desc->endpoint_count = 1; 1670 relinquish_desc->endpoint_id_array[0] = my_endpoint_id; 1671 thread_smccc(&args); 1672 if (!is_ffa_success(args.a0)) 1673 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 1674 } 1675 1676 static int set_pages(struct ffa_address_range *regions, 1677 unsigned int num_regions, unsigned int num_pages, 1678 struct mobj_ffa *mf) 1679 { 1680 unsigned int n = 0; 1681 unsigned int idx = 0; 1682 1683 for (n = 0; n < num_regions; n++) { 1684 unsigned int page_count = READ_ONCE(regions[n].page_count); 1685 uint64_t addr = READ_ONCE(regions[n].address); 1686 1687 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 1688 return FFA_INVALID_PARAMETERS; 1689 } 1690 1691 if (idx != num_pages) 1692 return FFA_INVALID_PARAMETERS; 1693 1694 return 0; 1695 } 1696 1697 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 1698 { 1699 struct mobj_ffa *ret = NULL; 1700 struct ffa_mem_transaction_x retrieve_desc = { }; 1701 struct ffa_mem_access *descr_array = NULL; 1702 struct ffa_mem_region *descr = NULL; 1703 struct mobj_ffa *mf = NULL; 1704 unsigned int num_pages = 0; 1705 unsigned int offs = 0; 1706 void *buf = NULL; 1707 struct thread_smc_args ffa_rx_release_args = { 1708 .a0 = FFA_RX_RELEASE 1709 }; 1710 1711 /* 1712 * OP-TEE is only supporting a single mem_region while the 1713 * specification allows for more than one. 1714 */ 1715 buf = spmc_retrieve_req(cookie, &retrieve_desc); 1716 if (!buf) { 1717 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 1718 cookie); 1719 return NULL; 1720 } 1721 1722 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 1723 offs = READ_ONCE(descr_array->region_offs); 1724 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 1725 1726 num_pages = READ_ONCE(descr->total_page_count); 1727 mf = mobj_ffa_spmc_new(cookie, num_pages); 1728 if (!mf) 1729 goto out; 1730 1731 if (set_pages(descr->address_range_array, 1732 READ_ONCE(descr->address_range_count), num_pages, mf)) { 1733 mobj_ffa_spmc_delete(mf); 1734 goto out; 1735 } 1736 1737 ret = mf; 1738 1739 out: 1740 /* Release RX buffer after the mem retrieve request. */ 1741 thread_smccc(&ffa_rx_release_args); 1742 1743 return ret; 1744 } 1745 1746 static TEE_Result spmc_init(void) 1747 { 1748 unsigned int major = 0; 1749 unsigned int minor __maybe_unused = 0; 1750 uint32_t my_vers = 0; 1751 uint32_t vers = 0; 1752 1753 my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1754 vers = get_ffa_version(my_vers); 1755 major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK; 1756 minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK; 1757 DMSG("SPMC reported version %u.%u", major, minor); 1758 if (major != FFA_VERSION_MAJOR) { 1759 EMSG("Incompatible major version %u, expected %u", 1760 major, FFA_VERSION_MAJOR); 1761 panic(); 1762 } 1763 if (vers < my_vers) 1764 my_vers = vers; 1765 DMSG("Using version %u.%u", 1766 (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK, 1767 (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK); 1768 nw_rxtx.ffa_vers = my_vers; 1769 1770 spmc_rxtx_map(&nw_rxtx); 1771 my_endpoint_id = spmc_get_id(); 1772 DMSG("My endpoint ID %#x", my_endpoint_id); 1773 1774 return TEE_SUCCESS; 1775 } 1776 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 1777 1778 service_init(spmc_init); 1779