1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2021, Linaro Limited. 4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/panic.h> 13 #include <kernel/secure_partition.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/spmc_sp_handler.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/thread.h> 18 #include <kernel/thread_private.h> 19 #include <kernel/thread_spmc.h> 20 #include <kernel/virtualization.h> 21 #include <mm/core_mmu.h> 22 #include <mm/mobj.h> 23 #include <optee_ffa.h> 24 #include <optee_msg.h> 25 #include <optee_rpc_cmd.h> 26 #include <sm/optee_smc.h> 27 #include <string.h> 28 #include <sys/queue.h> 29 #include <tee/entry_std.h> 30 #include <tee/uuid.h> 31 #include <util.h> 32 33 #if defined(CFG_CORE_SEL1_SPMC) 34 struct mem_share_state { 35 struct mobj_ffa *mf; 36 unsigned int page_count; 37 unsigned int region_count; 38 unsigned int current_page_idx; 39 }; 40 41 struct mem_frag_state { 42 struct mem_share_state share; 43 tee_mm_entry_t *mm; 44 unsigned int frag_offset; 45 SLIST_ENTRY(mem_frag_state) link; 46 }; 47 #endif 48 49 /* Initialized in spmc_init() below */ 50 static uint16_t my_endpoint_id __nex_bss; 51 #ifdef CFG_CORE_SEL1_SPMC 52 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 53 FFA_PART_PROP_DIRECT_REQ_SEND | 54 #ifdef CFG_NS_VIRTUALIZATION 55 FFA_PART_PROP_NOTIF_CREATED | 56 FFA_PART_PROP_NOTIF_DESTROYED | 57 #endif 58 #ifdef ARM64 59 FFA_PART_PROP_AARCH64_STATE | 60 #endif 61 FFA_PART_PROP_IS_PE_ID; 62 63 static uint32_t my_uuid_words[] = { 64 /* 65 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 66 * SP, or 67 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 68 * logical partition, residing in the same exception level as the 69 * SPMC 70 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 71 */ 72 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, 73 }; 74 75 /* 76 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 77 * 78 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 79 * access this includes the use of content of struct ffa_rxtx::rx and 80 * @frag_state_head. 81 * 82 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 83 * ffa_rxtx::tx and false when it is owned by normal world. 84 * 85 * Note that we can't prevent normal world from updating the content of 86 * these buffers so we must always be careful when reading. while we hold 87 * the lock. 88 */ 89 90 static struct ffa_rxtx my_rxtx __nex_bss; 91 92 static bool is_nw_buf(struct ffa_rxtx *rxtx) 93 { 94 return rxtx == &my_rxtx; 95 } 96 97 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 98 SLIST_HEAD_INITIALIZER(&frag_state_head); 99 #else 100 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 101 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 102 static struct ffa_rxtx my_rxtx = { 103 .rx = __rx_buf, 104 .tx = __tx_buf, 105 .size = sizeof(__rx_buf), 106 }; 107 #endif 108 109 static uint32_t swap_src_dst(uint32_t src_dst) 110 { 111 return (src_dst >> 16) | (src_dst << 16); 112 } 113 114 static uint16_t get_sender_id(uint32_t src_dst) 115 { 116 return src_dst >> 16; 117 } 118 119 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 120 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 121 { 122 *args = (struct thread_smc_args){ .a0 = fid, 123 .a1 = src_dst, 124 .a2 = w2, 125 .a3 = w3, 126 .a4 = w4, 127 .a5 = w5, }; 128 } 129 130 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 131 { 132 /* 133 * No locking, if the caller does concurrent calls to this it's 134 * only making a mess for itself. We must be able to renegotiate 135 * the FF-A version in order to support differing versions between 136 * the loader and the driver. 137 */ 138 if (vers < FFA_VERSION_1_1) 139 rxtx->ffa_vers = FFA_VERSION_1_0; 140 else 141 rxtx->ffa_vers = FFA_VERSION_1_1; 142 143 return rxtx->ffa_vers; 144 } 145 146 #if defined(CFG_CORE_SEL1_SPMC) 147 static void handle_features(struct thread_smc_args *args) 148 { 149 uint32_t ret_fid = 0; 150 uint32_t ret_w2 = FFA_PARAM_MBZ; 151 152 switch (args->a1) { 153 #ifdef ARM64 154 case FFA_RXTX_MAP_64: 155 #endif 156 case FFA_RXTX_MAP_32: 157 ret_fid = FFA_SUCCESS_32; 158 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 159 break; 160 #ifdef ARM64 161 case FFA_MEM_SHARE_64: 162 #endif 163 case FFA_MEM_SHARE_32: 164 ret_fid = FFA_SUCCESS_32; 165 /* 166 * Partition manager supports transmission of a memory 167 * transaction descriptor in a buffer dynamically allocated 168 * by the endpoint. 169 */ 170 ret_w2 = BIT(0); 171 break; 172 173 case FFA_ERROR: 174 case FFA_VERSION: 175 case FFA_SUCCESS_32: 176 #ifdef ARM64 177 case FFA_SUCCESS_64: 178 #endif 179 case FFA_FEATURES: 180 case FFA_SPM_ID_GET: 181 case FFA_MEM_FRAG_TX: 182 case FFA_MEM_RECLAIM: 183 case FFA_MSG_SEND_DIRECT_REQ_64: 184 case FFA_MSG_SEND_DIRECT_REQ_32: 185 case FFA_INTERRUPT: 186 case FFA_PARTITION_INFO_GET: 187 case FFA_RXTX_UNMAP: 188 case FFA_RX_RELEASE: 189 case FFA_FEATURE_MANAGED_EXIT_INTR: 190 ret_fid = FFA_SUCCESS_32; 191 break; 192 default: 193 ret_fid = FFA_ERROR; 194 ret_w2 = FFA_NOT_SUPPORTED; 195 break; 196 } 197 198 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 199 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 200 } 201 202 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 203 { 204 tee_mm_entry_t *mm = NULL; 205 206 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 207 return FFA_INVALID_PARAMETERS; 208 209 mm = tee_mm_alloc(&tee_mm_shm, sz); 210 if (!mm) 211 return FFA_NO_MEMORY; 212 213 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 214 sz / SMALL_PAGE_SIZE, 215 MEM_AREA_NSEC_SHM)) { 216 tee_mm_free(mm); 217 return FFA_INVALID_PARAMETERS; 218 } 219 220 *va_ret = (void *)tee_mm_get_smem(mm); 221 return 0; 222 } 223 224 static void handle_spm_id_get(struct thread_smc_args *args) 225 { 226 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id, 227 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 228 } 229 230 static void unmap_buf(void *va, size_t sz) 231 { 232 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va); 233 234 assert(mm); 235 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 236 tee_mm_free(mm); 237 } 238 239 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 240 { 241 int rc = 0; 242 uint32_t ret_fid = FFA_ERROR; 243 unsigned int sz = 0; 244 paddr_t rx_pa = 0; 245 paddr_t tx_pa = 0; 246 void *rx = NULL; 247 void *tx = NULL; 248 249 cpu_spin_lock(&rxtx->spinlock); 250 251 if (args->a3 & GENMASK_64(63, 6)) { 252 rc = FFA_INVALID_PARAMETERS; 253 goto out; 254 } 255 256 sz = args->a3 * SMALL_PAGE_SIZE; 257 if (!sz) { 258 rc = FFA_INVALID_PARAMETERS; 259 goto out; 260 } 261 /* TX/RX are swapped compared to the caller */ 262 tx_pa = args->a2; 263 rx_pa = args->a1; 264 265 if (rxtx->size) { 266 rc = FFA_DENIED; 267 goto out; 268 } 269 270 /* 271 * If the buffer comes from a SP the address is virtual and already 272 * mapped. 273 */ 274 if (is_nw_buf(rxtx)) { 275 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 276 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 277 bool tx_alloced = false; 278 279 /* 280 * With virtualization we establish this mapping in 281 * the nexus mapping which then is replicated to 282 * each partition. 283 * 284 * This means that this mapping must be done before 285 * any partition is created and then must not be 286 * changed. 287 */ 288 289 /* 290 * core_mmu_add_mapping() may reuse previous 291 * mappings. First check if there's any mappings to 292 * reuse so we know how to clean up in case of 293 * failure. 294 */ 295 tx = phys_to_virt(tx_pa, mt, sz); 296 rx = phys_to_virt(rx_pa, mt, sz); 297 if (!tx) { 298 tx = core_mmu_add_mapping(mt, tx_pa, sz); 299 if (!tx) { 300 rc = FFA_NO_MEMORY; 301 goto out; 302 } 303 tx_alloced = true; 304 } 305 if (!rx) 306 rx = core_mmu_add_mapping(mt, rx_pa, sz); 307 308 if (!rx) { 309 if (tx_alloced && tx) 310 core_mmu_remove_mapping(mt, tx, sz); 311 rc = FFA_NO_MEMORY; 312 goto out; 313 } 314 } else { 315 rc = map_buf(tx_pa, sz, &tx); 316 if (rc) 317 goto out; 318 rc = map_buf(rx_pa, sz, &rx); 319 if (rc) { 320 unmap_buf(tx, sz); 321 goto out; 322 } 323 } 324 rxtx->tx = tx; 325 rxtx->rx = rx; 326 } else { 327 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 328 rc = FFA_INVALID_PARAMETERS; 329 goto out; 330 } 331 332 if (!virt_to_phys((void *)tx_pa) || 333 !virt_to_phys((void *)rx_pa)) { 334 rc = FFA_INVALID_PARAMETERS; 335 goto out; 336 } 337 338 rxtx->tx = (void *)tx_pa; 339 rxtx->rx = (void *)rx_pa; 340 } 341 342 rxtx->size = sz; 343 rxtx->tx_is_mine = true; 344 ret_fid = FFA_SUCCESS_32; 345 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 346 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 347 out: 348 cpu_spin_unlock(&rxtx->spinlock); 349 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 350 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 351 } 352 353 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 354 { 355 uint32_t ret_fid = FFA_ERROR; 356 int rc = FFA_INVALID_PARAMETERS; 357 358 cpu_spin_lock(&rxtx->spinlock); 359 360 if (!rxtx->size) 361 goto out; 362 363 /* We don't unmap the SP memory as the SP might still use it */ 364 if (is_nw_buf(rxtx)) { 365 unmap_buf(rxtx->rx, rxtx->size); 366 unmap_buf(rxtx->tx, rxtx->size); 367 } 368 rxtx->size = 0; 369 rxtx->rx = NULL; 370 rxtx->tx = NULL; 371 ret_fid = FFA_SUCCESS_32; 372 rc = 0; 373 out: 374 cpu_spin_unlock(&rxtx->spinlock); 375 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 376 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 377 } 378 379 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 380 { 381 uint32_t ret_fid = 0; 382 int rc = 0; 383 384 cpu_spin_lock(&rxtx->spinlock); 385 /* The senders RX is our TX */ 386 if (!rxtx->size || rxtx->tx_is_mine) { 387 ret_fid = FFA_ERROR; 388 rc = FFA_DENIED; 389 } else { 390 ret_fid = FFA_SUCCESS_32; 391 rc = 0; 392 rxtx->tx_is_mine = true; 393 } 394 cpu_spin_unlock(&rxtx->spinlock); 395 396 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 397 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 398 } 399 400 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 401 { 402 return !w0 && !w1 && !w2 && !w3; 403 } 404 405 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 406 { 407 /* 408 * This depends on which UUID we have been assigned. 409 * TODO add a generic mechanism to obtain our UUID. 410 * 411 * The test below is for the hard coded UUID 412 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 413 */ 414 return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] && 415 w2 == my_uuid_words[2] && w3 == my_uuid_words[3]; 416 } 417 418 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 419 size_t idx, uint16_t endpoint_id, 420 uint16_t execution_context, 421 uint32_t part_props, 422 const uint32_t uuid_words[4]) 423 { 424 struct ffa_partition_info_x *fpi = NULL; 425 size_t fpi_size = sizeof(*fpi); 426 427 if (ffa_vers >= FFA_VERSION_1_1) 428 fpi_size += FFA_UUID_SIZE; 429 430 if ((idx + 1) * fpi_size > blen) 431 return TEE_ERROR_OUT_OF_MEMORY; 432 433 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 434 fpi->id = endpoint_id; 435 /* Number of execution contexts implemented by this partition */ 436 fpi->execution_context = execution_context; 437 438 fpi->partition_properties = part_props; 439 440 if (ffa_vers >= FFA_VERSION_1_1) { 441 if (uuid_words) 442 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 443 else 444 memset(fpi->uuid, 0, FFA_UUID_SIZE); 445 } 446 447 return TEE_SUCCESS; 448 } 449 450 static int handle_partition_info_get_all(size_t *elem_count, 451 struct ffa_rxtx *rxtx, bool count_only) 452 { 453 if (!count_only) { 454 /* Add OP-TEE SP */ 455 if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx, 456 rxtx->size, 0, my_endpoint_id, 457 CFG_TEE_CORE_NB_CORE, 458 my_part_props, my_uuid_words)) 459 return FFA_NO_MEMORY; 460 } 461 *elem_count = 1; 462 463 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 464 if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 465 NULL, elem_count, count_only)) 466 return FFA_NO_MEMORY; 467 } 468 469 return FFA_OK; 470 } 471 472 void spmc_handle_partition_info_get(struct thread_smc_args *args, 473 struct ffa_rxtx *rxtx) 474 { 475 TEE_Result res = TEE_SUCCESS; 476 uint32_t ret_fid = FFA_ERROR; 477 uint32_t rc = 0; 478 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 479 480 if (!count_only) { 481 cpu_spin_lock(&rxtx->spinlock); 482 483 if (!rxtx->size || !rxtx->tx_is_mine) { 484 rc = FFA_BUSY; 485 goto out; 486 } 487 } 488 489 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 490 size_t elem_count = 0; 491 492 ret_fid = handle_partition_info_get_all(&elem_count, rxtx, 493 count_only); 494 495 if (ret_fid) { 496 rc = ret_fid; 497 ret_fid = FFA_ERROR; 498 } else { 499 ret_fid = FFA_SUCCESS_32; 500 rc = elem_count; 501 } 502 503 goto out; 504 } 505 506 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 507 if (!count_only) { 508 res = spmc_fill_partition_entry(rxtx->ffa_vers, 509 rxtx->tx, rxtx->size, 0, 510 my_endpoint_id, 511 CFG_TEE_CORE_NB_CORE, 512 my_part_props, 513 my_uuid_words); 514 if (res) { 515 ret_fid = FFA_ERROR; 516 rc = FFA_INVALID_PARAMETERS; 517 goto out; 518 } 519 } 520 rc = 1; 521 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 522 uint32_t uuid_array[4] = { 0 }; 523 TEE_UUID uuid = { }; 524 size_t count = 0; 525 526 uuid_array[0] = args->a1; 527 uuid_array[1] = args->a2; 528 uuid_array[2] = args->a3; 529 uuid_array[3] = args->a4; 530 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 531 532 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 533 rxtx->size, &uuid, &count, 534 count_only); 535 if (res != TEE_SUCCESS) { 536 ret_fid = FFA_ERROR; 537 rc = FFA_INVALID_PARAMETERS; 538 goto out; 539 } 540 rc = count; 541 } else { 542 ret_fid = FFA_ERROR; 543 rc = FFA_INVALID_PARAMETERS; 544 goto out; 545 } 546 547 ret_fid = FFA_SUCCESS_32; 548 549 out: 550 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 551 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 552 if (!count_only) { 553 rxtx->tx_is_mine = false; 554 cpu_spin_unlock(&rxtx->spinlock); 555 } 556 } 557 558 static void spmc_handle_run(struct thread_smc_args *args) 559 { 560 uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1); 561 uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1); 562 uint32_t rc = FFA_OK; 563 564 if (endpoint != my_endpoint_id) { 565 /* 566 * The endpoint should be an SP, try to resume the SP from 567 * preempted into busy state. 568 */ 569 rc = spmc_sp_resume_from_preempted(endpoint); 570 if (rc) 571 goto out; 572 } 573 574 thread_resume_from_rpc(thread_id, 0, 0, 0, 0); 575 576 /* thread_resume_from_rpc return only of the thread_id is invalid */ 577 rc = FFA_INVALID_PARAMETERS; 578 579 out: 580 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 581 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 582 } 583 #endif /*CFG_CORE_SEL1_SPMC*/ 584 585 static void handle_yielding_call(struct thread_smc_args *args, 586 uint32_t direct_resp_fid) 587 { 588 TEE_Result res = 0; 589 590 thread_check_canaries(); 591 592 #ifdef ARM64 593 /* Saving this for an eventual RPC */ 594 thread_get_core_local()->direct_resp_fid = direct_resp_fid; 595 #endif 596 597 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 598 /* Note connection to struct thread_rpc_arg::ret */ 599 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 600 0); 601 res = TEE_ERROR_BAD_PARAMETERS; 602 } else { 603 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 604 args->a6, args->a7); 605 res = TEE_ERROR_BUSY; 606 } 607 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 608 0, res, 0, 0); 609 } 610 611 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 612 { 613 uint64_t cookie = reg_pair_to_64(a5, a4); 614 uint32_t res = 0; 615 616 res = mobj_ffa_unregister_by_cookie(cookie); 617 switch (res) { 618 case TEE_SUCCESS: 619 case TEE_ERROR_ITEM_NOT_FOUND: 620 return 0; 621 case TEE_ERROR_BUSY: 622 EMSG("res %#"PRIx32, res); 623 return FFA_BUSY; 624 default: 625 EMSG("res %#"PRIx32, res); 626 return FFA_INVALID_PARAMETERS; 627 } 628 } 629 630 static void handle_blocking_call(struct thread_smc_args *args, 631 uint32_t direct_resp_fid) 632 { 633 switch (args->a3) { 634 case OPTEE_FFA_GET_API_VERSION: 635 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 636 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 637 0); 638 break; 639 case OPTEE_FFA_GET_OS_VERSION: 640 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 641 CFG_OPTEE_REVISION_MAJOR, 642 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1); 643 break; 644 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 645 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 646 0, THREAD_RPC_MAX_NUM_PARAMS, 647 OPTEE_FFA_SEC_CAP_ARG_OFFSET); 648 break; 649 case OPTEE_FFA_UNREGISTER_SHM: 650 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 651 handle_unregister_shm(args->a4, args->a5), 0, 0); 652 break; 653 default: 654 EMSG("Unhandled blocking service ID %#"PRIx32, 655 (uint32_t)args->a3); 656 panic(); 657 } 658 } 659 660 static void handle_framework_direct_request(struct thread_smc_args *args, 661 struct ffa_rxtx *rxtx, 662 uint32_t direct_resp_fid) 663 { 664 uint32_t w0 = FFA_ERROR; 665 uint32_t w1 = FFA_PARAM_MBZ; 666 uint32_t w2 = FFA_NOT_SUPPORTED; 667 uint32_t w3 = FFA_PARAM_MBZ; 668 669 switch (args->a2 & FFA_MSG_TYPE_MASK) { 670 case FFA_MSG_SEND_VM_CREATED: 671 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 672 uint16_t guest_id = args->a5; 673 TEE_Result res = virt_guest_created(guest_id); 674 675 w0 = direct_resp_fid; 676 w1 = swap_src_dst(args->a1); 677 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 678 if (res == TEE_SUCCESS) 679 w3 = FFA_OK; 680 else if (res == TEE_ERROR_OUT_OF_MEMORY) 681 w3 = FFA_DENIED; 682 else 683 w3 = FFA_INVALID_PARAMETERS; 684 } 685 break; 686 case FFA_MSG_SEND_VM_DESTROYED: 687 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 688 uint16_t guest_id = args->a5; 689 TEE_Result res = virt_guest_destroyed(guest_id); 690 691 w0 = direct_resp_fid; 692 w1 = swap_src_dst(args->a1); 693 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED; 694 if (res == TEE_SUCCESS) 695 w3 = FFA_OK; 696 else 697 w3 = FFA_INVALID_PARAMETERS; 698 } 699 break; 700 case FFA_MSG_VERSION_REQ: 701 w0 = direct_resp_fid; 702 w1 = swap_src_dst(args->a1); 703 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 704 w3 = spmc_exchange_version(args->a3, rxtx); 705 break; 706 default: 707 break; 708 } 709 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 710 } 711 712 static void handle_direct_request(struct thread_smc_args *args, 713 struct ffa_rxtx *rxtx) 714 { 715 uint32_t direct_resp_fid = 0; 716 717 if (IS_ENABLED(CFG_SECURE_PARTITION) && 718 FFA_DST(args->a1) != my_endpoint_id) { 719 spmc_sp_start_thread(args); 720 return; 721 } 722 723 if (OPTEE_SMC_IS_64(args->a0)) 724 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64; 725 else 726 direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32; 727 728 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 729 handle_framework_direct_request(args, rxtx, direct_resp_fid); 730 return; 731 } 732 733 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 734 virt_set_guest(get_sender_id(args->a1))) { 735 spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0, 736 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 737 return; 738 } 739 740 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 741 handle_yielding_call(args, direct_resp_fid); 742 else 743 handle_blocking_call(args, direct_resp_fid); 744 745 /* 746 * Note that handle_yielding_call() typically only returns if a 747 * thread cannot be allocated or found. virt_unset_guest() is also 748 * called from thread_state_suspend() and thread_state_free(). 749 */ 750 virt_unset_guest(); 751 } 752 753 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 754 struct ffa_mem_transaction_x *trans) 755 { 756 uint16_t mem_reg_attr = 0; 757 uint32_t flags = 0; 758 uint32_t count = 0; 759 uint32_t offs = 0; 760 uint32_t size = 0; 761 size_t n = 0; 762 763 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 764 return FFA_INVALID_PARAMETERS; 765 766 if (ffa_vers >= FFA_VERSION_1_1) { 767 struct ffa_mem_transaction_1_1 *descr = NULL; 768 769 if (blen < sizeof(*descr)) 770 return FFA_INVALID_PARAMETERS; 771 772 descr = buf; 773 trans->sender_id = READ_ONCE(descr->sender_id); 774 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 775 flags = READ_ONCE(descr->flags); 776 trans->global_handle = READ_ONCE(descr->global_handle); 777 trans->tag = READ_ONCE(descr->tag); 778 779 count = READ_ONCE(descr->mem_access_count); 780 size = READ_ONCE(descr->mem_access_size); 781 offs = READ_ONCE(descr->mem_access_offs); 782 } else { 783 struct ffa_mem_transaction_1_0 *descr = NULL; 784 785 if (blen < sizeof(*descr)) 786 return FFA_INVALID_PARAMETERS; 787 788 descr = buf; 789 trans->sender_id = READ_ONCE(descr->sender_id); 790 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 791 flags = READ_ONCE(descr->flags); 792 trans->global_handle = READ_ONCE(descr->global_handle); 793 trans->tag = READ_ONCE(descr->tag); 794 795 count = READ_ONCE(descr->mem_access_count); 796 size = sizeof(struct ffa_mem_access); 797 offs = offsetof(struct ffa_mem_transaction_1_0, 798 mem_access_array); 799 } 800 801 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 802 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 803 return FFA_INVALID_PARAMETERS; 804 805 /* Check that the endpoint memory access descriptor array fits */ 806 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 807 n > blen) 808 return FFA_INVALID_PARAMETERS; 809 810 trans->mem_reg_attr = mem_reg_attr; 811 trans->flags = flags; 812 trans->mem_access_size = size; 813 trans->mem_access_count = count; 814 trans->mem_access_offs = offs; 815 return 0; 816 } 817 818 #if defined(CFG_CORE_SEL1_SPMC) 819 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 820 unsigned int mem_access_count, uint8_t *acc_perms, 821 unsigned int *region_offs) 822 { 823 struct ffa_mem_access_perm *descr = NULL; 824 struct ffa_mem_access *mem_acc = NULL; 825 unsigned int n = 0; 826 827 for (n = 0; n < mem_access_count; n++) { 828 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 829 descr = &mem_acc->access_perm; 830 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) { 831 *acc_perms = READ_ONCE(descr->perm); 832 *region_offs = READ_ONCE(mem_acc[n].region_offs); 833 return 0; 834 } 835 } 836 837 return FFA_INVALID_PARAMETERS; 838 } 839 840 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf, 841 size_t blen, unsigned int *page_count, 842 unsigned int *region_count, size_t *addr_range_offs) 843 { 844 const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 845 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 846 struct ffa_mem_region *region_descr = NULL; 847 unsigned int region_descr_offs = 0; 848 uint8_t mem_acc_perm = 0; 849 size_t n = 0; 850 851 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 852 return FFA_INVALID_PARAMETERS; 853 854 /* Check that the access permissions matches what's expected */ 855 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 856 mem_trans->mem_access_size, 857 mem_trans->mem_access_count, 858 &mem_acc_perm, ®ion_descr_offs) || 859 mem_acc_perm != exp_mem_acc_perm) 860 return FFA_INVALID_PARAMETERS; 861 862 /* Check that the Composite memory region descriptor fits */ 863 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 864 n > blen) 865 return FFA_INVALID_PARAMETERS; 866 867 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 868 struct ffa_mem_region)) 869 return FFA_INVALID_PARAMETERS; 870 871 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 872 region_descr_offs); 873 *page_count = READ_ONCE(region_descr->total_page_count); 874 *region_count = READ_ONCE(region_descr->address_range_count); 875 *addr_range_offs = n; 876 return 0; 877 } 878 879 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 880 size_t flen) 881 { 882 unsigned int region_count = flen / sizeof(struct ffa_address_range); 883 struct ffa_address_range *arange = NULL; 884 unsigned int n = 0; 885 886 if (region_count > s->region_count) 887 region_count = s->region_count; 888 889 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 890 return FFA_INVALID_PARAMETERS; 891 arange = buf; 892 893 for (n = 0; n < region_count; n++) { 894 unsigned int page_count = READ_ONCE(arange[n].page_count); 895 uint64_t addr = READ_ONCE(arange[n].address); 896 897 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 898 addr, page_count)) 899 return FFA_INVALID_PARAMETERS; 900 } 901 902 s->region_count -= region_count; 903 if (s->region_count) 904 return region_count * sizeof(*arange); 905 906 if (s->current_page_idx != s->page_count) 907 return FFA_INVALID_PARAMETERS; 908 909 return 0; 910 } 911 912 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 913 { 914 int rc = 0; 915 916 rc = add_mem_share_helper(&s->share, buf, flen); 917 if (rc >= 0) { 918 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 919 /* We're not at the end of the descriptor yet */ 920 if (s->share.region_count) 921 return s->frag_offset; 922 923 /* We're done */ 924 rc = 0; 925 } else { 926 rc = FFA_INVALID_PARAMETERS; 927 } 928 } 929 930 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 931 if (rc < 0) 932 mobj_ffa_sel1_spmc_delete(s->share.mf); 933 else 934 mobj_ffa_push_to_inactive(s->share.mf); 935 free(s); 936 937 return rc; 938 } 939 940 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans, 941 void *buf) 942 { 943 struct ffa_mem_access_perm *perm = NULL; 944 struct ffa_mem_access *mem_acc = NULL; 945 946 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 947 return false; 948 949 if (mem_trans->mem_access_count < 1) 950 return false; 951 952 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 953 perm = &mem_acc->access_perm; 954 955 /* 956 * perm->endpoint_id is read here only to check if the endpoint is 957 * OP-TEE. We do read it later on again, but there are some additional 958 * checks there to make sure that the data is correct. 959 */ 960 return READ_ONCE(perm->endpoint_id) != my_endpoint_id; 961 } 962 963 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans, 964 tee_mm_entry_t *mm, void *buf, size_t blen, 965 size_t flen, uint64_t *global_handle) 966 { 967 int rc = 0; 968 struct mem_share_state share = { }; 969 size_t addr_range_offs = 0; 970 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 971 size_t n = 0; 972 973 rc = mem_share_init(mem_trans, buf, flen, &share.page_count, 974 &share.region_count, &addr_range_offs); 975 if (rc) 976 return rc; 977 978 if (MUL_OVERFLOW(share.region_count, 979 sizeof(struct ffa_address_range), &n) || 980 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 981 return FFA_INVALID_PARAMETERS; 982 983 if (mem_trans->global_handle) 984 cookie = mem_trans->global_handle; 985 share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count); 986 if (!share.mf) 987 return FFA_NO_MEMORY; 988 989 if (flen != blen) { 990 struct mem_frag_state *s = calloc(sizeof(*s), 1); 991 992 if (!s) { 993 rc = FFA_NO_MEMORY; 994 goto err; 995 } 996 s->share = share; 997 s->mm = mm; 998 s->frag_offset = addr_range_offs; 999 1000 SLIST_INSERT_HEAD(&frag_state_head, s, link); 1001 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 1002 flen - addr_range_offs); 1003 1004 if (rc >= 0) 1005 *global_handle = mobj_ffa_get_cookie(share.mf); 1006 1007 return rc; 1008 } 1009 1010 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 1011 flen - addr_range_offs); 1012 if (rc) { 1013 /* 1014 * Number of consumed bytes may be returned instead of 0 for 1015 * done. 1016 */ 1017 rc = FFA_INVALID_PARAMETERS; 1018 goto err; 1019 } 1020 1021 *global_handle = mobj_ffa_push_to_inactive(share.mf); 1022 1023 return 0; 1024 err: 1025 mobj_ffa_sel1_spmc_delete(share.mf); 1026 return rc; 1027 } 1028 1029 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 1030 unsigned int page_count, 1031 uint64_t *global_handle, struct ffa_rxtx *rxtx) 1032 { 1033 struct ffa_mem_transaction_x mem_trans = { }; 1034 int rc = 0; 1035 size_t len = 0; 1036 void *buf = NULL; 1037 tee_mm_entry_t *mm = NULL; 1038 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 1039 1040 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 1041 return FFA_INVALID_PARAMETERS; 1042 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 1043 return FFA_INVALID_PARAMETERS; 1044 1045 /* 1046 * Check that the length reported in flen is covered by len even 1047 * if the offset is taken into account. 1048 */ 1049 if (len < flen || len - offs < flen) 1050 return FFA_INVALID_PARAMETERS; 1051 1052 mm = tee_mm_alloc(&tee_mm_shm, len); 1053 if (!mm) 1054 return FFA_NO_MEMORY; 1055 1056 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1057 page_count, MEM_AREA_NSEC_SHM)) { 1058 rc = FFA_INVALID_PARAMETERS; 1059 goto out; 1060 } 1061 buf = (void *)(tee_mm_get_smem(mm) + offs); 1062 1063 cpu_spin_lock(&rxtx->spinlock); 1064 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1065 if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1066 virt_set_guest(mem_trans.sender_id)) 1067 rc = FFA_DENIED; 1068 if (!rc) 1069 rc = add_mem_share(&mem_trans, mm, buf, blen, flen, 1070 global_handle); 1071 virt_unset_guest(); 1072 cpu_spin_unlock(&rxtx->spinlock); 1073 if (rc > 0) 1074 return rc; 1075 1076 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1077 out: 1078 tee_mm_free(mm); 1079 return rc; 1080 } 1081 1082 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 1083 uint64_t *global_handle, 1084 struct ffa_rxtx *rxtx) 1085 { 1086 struct ffa_mem_transaction_x mem_trans = { }; 1087 int rc = FFA_DENIED; 1088 1089 cpu_spin_lock(&rxtx->spinlock); 1090 1091 if (!rxtx->rx || flen > rxtx->size) 1092 goto out; 1093 1094 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1095 &mem_trans); 1096 if (rc) 1097 goto out; 1098 if (is_sp_share(&mem_trans, rxtx->rx)) { 1099 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, 1100 global_handle, NULL); 1101 goto out; 1102 } 1103 1104 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1105 virt_set_guest(mem_trans.sender_id)) 1106 goto out; 1107 1108 rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen, 1109 global_handle); 1110 1111 virt_unset_guest(); 1112 1113 out: 1114 cpu_spin_unlock(&rxtx->spinlock); 1115 1116 return rc; 1117 } 1118 1119 static void handle_mem_share(struct thread_smc_args *args, 1120 struct ffa_rxtx *rxtx) 1121 { 1122 uint32_t tot_len = args->a1; 1123 uint32_t frag_len = args->a2; 1124 uint64_t addr = args->a3; 1125 uint32_t page_count = args->a4; 1126 uint32_t ret_w1 = 0; 1127 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1128 uint32_t ret_w3 = 0; 1129 uint32_t ret_fid = FFA_ERROR; 1130 uint64_t global_handle = 0; 1131 int rc = 0; 1132 1133 /* Check that the MBZs are indeed 0 */ 1134 if (args->a5 || args->a6 || args->a7) 1135 goto out; 1136 1137 /* Check that fragment length doesn't exceed total length */ 1138 if (frag_len > tot_len) 1139 goto out; 1140 1141 /* Check for 32-bit calling convention */ 1142 if (args->a0 == FFA_MEM_SHARE_32) 1143 addr &= UINT32_MAX; 1144 1145 if (!addr) { 1146 /* 1147 * The memory transaction descriptor is passed via our rx 1148 * buffer. 1149 */ 1150 if (page_count) 1151 goto out; 1152 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 1153 rxtx); 1154 } else { 1155 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 1156 &global_handle, rxtx); 1157 } 1158 if (rc < 0) { 1159 ret_w2 = rc; 1160 } else if (rc > 0) { 1161 ret_fid = FFA_MEM_FRAG_RX; 1162 ret_w3 = rc; 1163 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1164 } else { 1165 ret_fid = FFA_SUCCESS_32; 1166 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1167 } 1168 out: 1169 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1170 } 1171 1172 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1173 { 1174 struct mem_frag_state *s = NULL; 1175 1176 SLIST_FOREACH(s, &frag_state_head, link) 1177 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 1178 return s; 1179 1180 return NULL; 1181 } 1182 1183 static void handle_mem_frag_tx(struct thread_smc_args *args, 1184 struct ffa_rxtx *rxtx) 1185 { 1186 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1187 size_t flen = args->a3; 1188 uint32_t endpoint_id = args->a4; 1189 struct mem_frag_state *s = NULL; 1190 tee_mm_entry_t *mm = NULL; 1191 unsigned int page_count = 0; 1192 void *buf = NULL; 1193 uint32_t ret_w1 = 0; 1194 uint32_t ret_w2 = 0; 1195 uint32_t ret_w3 = 0; 1196 uint32_t ret_fid = 0; 1197 int rc = 0; 1198 1199 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1200 uint16_t guest_id = endpoint_id >> 16; 1201 1202 if (!guest_id || virt_set_guest(guest_id)) { 1203 rc = FFA_INVALID_PARAMETERS; 1204 goto out_set_rc; 1205 } 1206 } 1207 1208 /* 1209 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1210 * requests. 1211 */ 1212 1213 cpu_spin_lock(&rxtx->spinlock); 1214 1215 s = get_frag_state(global_handle); 1216 if (!s) { 1217 rc = FFA_INVALID_PARAMETERS; 1218 goto out; 1219 } 1220 1221 mm = s->mm; 1222 if (mm) { 1223 if (flen > tee_mm_get_bytes(mm)) { 1224 rc = FFA_INVALID_PARAMETERS; 1225 goto out; 1226 } 1227 page_count = s->share.page_count; 1228 buf = (void *)tee_mm_get_smem(mm); 1229 } else { 1230 if (flen > rxtx->size) { 1231 rc = FFA_INVALID_PARAMETERS; 1232 goto out; 1233 } 1234 buf = rxtx->rx; 1235 } 1236 1237 rc = add_mem_share_frag(s, buf, flen); 1238 out: 1239 virt_unset_guest(); 1240 cpu_spin_unlock(&rxtx->spinlock); 1241 1242 if (rc <= 0 && mm) { 1243 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1244 tee_mm_free(mm); 1245 } 1246 1247 out_set_rc: 1248 if (rc < 0) { 1249 ret_fid = FFA_ERROR; 1250 ret_w2 = rc; 1251 } else if (rc > 0) { 1252 ret_fid = FFA_MEM_FRAG_RX; 1253 ret_w3 = rc; 1254 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1255 } else { 1256 ret_fid = FFA_SUCCESS_32; 1257 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1258 } 1259 1260 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1261 } 1262 1263 static void handle_mem_reclaim(struct thread_smc_args *args) 1264 { 1265 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1266 uint32_t ret_fid = FFA_ERROR; 1267 uint64_t cookie = 0; 1268 1269 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1270 goto out; 1271 1272 cookie = reg_pair_to_64(args->a2, args->a1); 1273 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1274 uint16_t guest_id = 0; 1275 1276 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1277 guest_id = virt_find_guest_by_cookie(cookie); 1278 } else { 1279 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1280 FFA_MEMORY_HANDLE_PRTN_MASK; 1281 } 1282 if (!guest_id || virt_set_guest(guest_id)) 1283 goto out; 1284 } 1285 1286 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1287 case TEE_SUCCESS: 1288 ret_fid = FFA_SUCCESS_32; 1289 ret_val = 0; 1290 break; 1291 case TEE_ERROR_ITEM_NOT_FOUND: 1292 DMSG("cookie %#"PRIx64" not found", cookie); 1293 ret_val = FFA_INVALID_PARAMETERS; 1294 break; 1295 default: 1296 DMSG("cookie %#"PRIx64" busy", cookie); 1297 ret_val = FFA_DENIED; 1298 break; 1299 } 1300 1301 virt_unset_guest(); 1302 1303 out: 1304 spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0); 1305 } 1306 #endif 1307 1308 /* Only called from assembly */ 1309 void thread_spmc_msg_recv(struct thread_smc_args *args); 1310 void thread_spmc_msg_recv(struct thread_smc_args *args) 1311 { 1312 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1313 switch (args->a0) { 1314 #if defined(CFG_CORE_SEL1_SPMC) 1315 case FFA_FEATURES: 1316 handle_features(args); 1317 break; 1318 case FFA_SPM_ID_GET: 1319 handle_spm_id_get(args); 1320 break; 1321 #ifdef ARM64 1322 case FFA_RXTX_MAP_64: 1323 #endif 1324 case FFA_RXTX_MAP_32: 1325 spmc_handle_rxtx_map(args, &my_rxtx); 1326 break; 1327 case FFA_RXTX_UNMAP: 1328 spmc_handle_rxtx_unmap(args, &my_rxtx); 1329 break; 1330 case FFA_RX_RELEASE: 1331 spmc_handle_rx_release(args, &my_rxtx); 1332 break; 1333 case FFA_PARTITION_INFO_GET: 1334 spmc_handle_partition_info_get(args, &my_rxtx); 1335 break; 1336 case FFA_RUN: 1337 spmc_handle_run(args); 1338 break; 1339 #endif /*CFG_CORE_SEL1_SPMC*/ 1340 case FFA_INTERRUPT: 1341 interrupt_main_handler(); 1342 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1343 break; 1344 #ifdef ARM64 1345 case FFA_MSG_SEND_DIRECT_REQ_64: 1346 #endif 1347 case FFA_MSG_SEND_DIRECT_REQ_32: 1348 handle_direct_request(args, &my_rxtx); 1349 break; 1350 #if defined(CFG_CORE_SEL1_SPMC) 1351 #ifdef ARM64 1352 case FFA_MEM_SHARE_64: 1353 #endif 1354 case FFA_MEM_SHARE_32: 1355 handle_mem_share(args, &my_rxtx); 1356 break; 1357 case FFA_MEM_RECLAIM: 1358 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1359 !ffa_mem_reclaim(args, NULL)) 1360 handle_mem_reclaim(args); 1361 break; 1362 case FFA_MEM_FRAG_TX: 1363 handle_mem_frag_tx(args, &my_rxtx); 1364 break; 1365 #endif /*CFG_CORE_SEL1_SPMC*/ 1366 default: 1367 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 1368 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1369 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1370 } 1371 } 1372 1373 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 1374 { 1375 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1376 struct thread_ctx *thr = threads + thread_get_id(); 1377 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1378 struct optee_msg_arg *arg = NULL; 1379 struct mobj *mobj = NULL; 1380 uint32_t num_params = 0; 1381 size_t sz = 0; 1382 1383 mobj = mobj_ffa_get_by_cookie(cookie, 0); 1384 if (!mobj) { 1385 EMSG("Can't find cookie %#"PRIx64, cookie); 1386 return TEE_ERROR_BAD_PARAMETERS; 1387 } 1388 1389 res = mobj_inc_map(mobj); 1390 if (res) 1391 goto out_put_mobj; 1392 1393 res = TEE_ERROR_BAD_PARAMETERS; 1394 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 1395 if (!arg) 1396 goto out_dec_map; 1397 1398 num_params = READ_ONCE(arg->num_params); 1399 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 1400 goto out_dec_map; 1401 1402 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 1403 1404 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 1405 if (!thr->rpc_arg) 1406 goto out_dec_map; 1407 1408 virt_on_stdcall(); 1409 res = tee_entry_std(arg, num_params); 1410 1411 thread_rpc_shm_cache_clear(&thr->shm_cache); 1412 thr->rpc_arg = NULL; 1413 1414 out_dec_map: 1415 mobj_dec_map(mobj); 1416 out_put_mobj: 1417 mobj_put(mobj); 1418 return res; 1419 } 1420 1421 /* 1422 * Helper routine for the assembly function thread_std_smc_entry() 1423 * 1424 * Note: this function is weak just to make link_dummies_paged.c happy. 1425 */ 1426 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 1427 uint32_t a2, uint32_t a3, 1428 uint32_t a4, uint32_t a5 __unused) 1429 { 1430 /* 1431 * Arguments are supplied from handle_yielding_call() as: 1432 * a0 <- w1 1433 * a1 <- w3 1434 * a2 <- w4 1435 * a3 <- w5 1436 * a4 <- w6 1437 * a5 <- w7 1438 */ 1439 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 1440 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 1441 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 1442 return FFA_DENIED; 1443 } 1444 1445 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 1446 { 1447 uint64_t offs = tpm->u.memref.offs; 1448 1449 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 1450 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 1451 1452 param->u.fmem.offs_low = offs; 1453 param->u.fmem.offs_high = offs >> 32; 1454 if (param->u.fmem.offs_high != offs >> 32) 1455 return false; 1456 1457 param->u.fmem.size = tpm->u.memref.size; 1458 if (tpm->u.memref.mobj) { 1459 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 1460 1461 /* If a mobj is passed it better be one with a valid cookie. */ 1462 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 1463 return false; 1464 param->u.fmem.global_id = cookie; 1465 } else { 1466 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1467 } 1468 1469 return true; 1470 } 1471 1472 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 1473 struct thread_param *params, 1474 struct optee_msg_arg **arg_ret) 1475 { 1476 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1477 struct thread_ctx *thr = threads + thread_get_id(); 1478 struct optee_msg_arg *arg = thr->rpc_arg; 1479 1480 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1481 return TEE_ERROR_BAD_PARAMETERS; 1482 1483 if (!arg) { 1484 EMSG("rpc_arg not set"); 1485 return TEE_ERROR_GENERIC; 1486 } 1487 1488 memset(arg, 0, sz); 1489 arg->cmd = cmd; 1490 arg->num_params = num_params; 1491 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1492 1493 for (size_t n = 0; n < num_params; n++) { 1494 switch (params[n].attr) { 1495 case THREAD_PARAM_ATTR_NONE: 1496 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 1497 break; 1498 case THREAD_PARAM_ATTR_VALUE_IN: 1499 case THREAD_PARAM_ATTR_VALUE_OUT: 1500 case THREAD_PARAM_ATTR_VALUE_INOUT: 1501 arg->params[n].attr = params[n].attr - 1502 THREAD_PARAM_ATTR_VALUE_IN + 1503 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1504 arg->params[n].u.value.a = params[n].u.value.a; 1505 arg->params[n].u.value.b = params[n].u.value.b; 1506 arg->params[n].u.value.c = params[n].u.value.c; 1507 break; 1508 case THREAD_PARAM_ATTR_MEMREF_IN: 1509 case THREAD_PARAM_ATTR_MEMREF_OUT: 1510 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1511 if (!set_fmem(arg->params + n, params + n)) 1512 return TEE_ERROR_BAD_PARAMETERS; 1513 break; 1514 default: 1515 return TEE_ERROR_BAD_PARAMETERS; 1516 } 1517 } 1518 1519 if (arg_ret) 1520 *arg_ret = arg; 1521 1522 return TEE_SUCCESS; 1523 } 1524 1525 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 1526 struct thread_param *params) 1527 { 1528 for (size_t n = 0; n < num_params; n++) { 1529 switch (params[n].attr) { 1530 case THREAD_PARAM_ATTR_VALUE_OUT: 1531 case THREAD_PARAM_ATTR_VALUE_INOUT: 1532 params[n].u.value.a = arg->params[n].u.value.a; 1533 params[n].u.value.b = arg->params[n].u.value.b; 1534 params[n].u.value.c = arg->params[n].u.value.c; 1535 break; 1536 case THREAD_PARAM_ATTR_MEMREF_OUT: 1537 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1538 params[n].u.memref.size = arg->params[n].u.fmem.size; 1539 break; 1540 default: 1541 break; 1542 } 1543 } 1544 1545 return arg->ret; 1546 } 1547 1548 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1549 struct thread_param *params) 1550 { 1551 struct thread_rpc_arg rpc_arg = { .call = { 1552 .w1 = thread_get_tsd()->rpc_target_info, 1553 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1554 }, 1555 }; 1556 struct optee_msg_arg *arg = NULL; 1557 uint32_t ret = 0; 1558 1559 ret = get_rpc_arg(cmd, num_params, params, &arg); 1560 if (ret) 1561 return ret; 1562 1563 thread_rpc(&rpc_arg); 1564 1565 return get_rpc_arg_res(arg, num_params, params); 1566 } 1567 1568 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1569 { 1570 struct thread_rpc_arg rpc_arg = { .call = { 1571 .w1 = thread_get_tsd()->rpc_target_info, 1572 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1573 }, 1574 }; 1575 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 1576 uint32_t res2 = 0; 1577 uint32_t res = 0; 1578 1579 DMSG("freeing cookie %#"PRIx64, cookie); 1580 1581 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 1582 1583 mobj_put(mobj); 1584 res2 = mobj_ffa_unregister_by_cookie(cookie); 1585 if (res2) 1586 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 1587 cookie, res2); 1588 if (!res) 1589 thread_rpc(&rpc_arg); 1590 } 1591 1592 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1593 { 1594 struct thread_rpc_arg rpc_arg = { .call = { 1595 .w1 = thread_get_tsd()->rpc_target_info, 1596 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1597 }, 1598 }; 1599 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 1600 struct optee_msg_arg *arg = NULL; 1601 unsigned int internal_offset = 0; 1602 struct mobj *mobj = NULL; 1603 uint64_t cookie = 0; 1604 1605 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 1606 return NULL; 1607 1608 thread_rpc(&rpc_arg); 1609 1610 if (arg->num_params != 1 || 1611 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 1612 return NULL; 1613 1614 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 1615 cookie = READ_ONCE(arg->params->u.fmem.global_id); 1616 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 1617 if (!mobj) { 1618 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 1619 cookie, internal_offset); 1620 return NULL; 1621 } 1622 1623 assert(mobj_is_nonsec(mobj)); 1624 1625 if (mobj->size < size) { 1626 DMSG("Mobj %#"PRIx64": wrong size", cookie); 1627 mobj_put(mobj); 1628 return NULL; 1629 } 1630 1631 if (mobj_inc_map(mobj)) { 1632 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 1633 mobj_put(mobj); 1634 return NULL; 1635 } 1636 1637 return mobj; 1638 } 1639 1640 struct mobj *thread_rpc_alloc_payload(size_t size) 1641 { 1642 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 1643 } 1644 1645 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 1646 { 1647 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 1648 } 1649 1650 void thread_rpc_free_kernel_payload(struct mobj *mobj) 1651 { 1652 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj); 1653 } 1654 1655 void thread_rpc_free_payload(struct mobj *mobj) 1656 { 1657 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1658 mobj); 1659 } 1660 1661 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1662 { 1663 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 1664 } 1665 1666 void thread_rpc_free_global_payload(struct mobj *mobj) 1667 { 1668 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj), 1669 mobj); 1670 } 1671 1672 void thread_spmc_register_secondary_ep(vaddr_t ep) 1673 { 1674 unsigned long ret = 0; 1675 1676 /* Let the SPM know the entry point for secondary CPUs */ 1677 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 1678 1679 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 1680 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 1681 } 1682 1683 #if defined(CFG_CORE_SEL1_SPMC) 1684 static TEE_Result spmc_init(void) 1685 { 1686 my_endpoint_id = SPMC_ENDPOINT_ID; 1687 DMSG("My endpoint ID %#x", my_endpoint_id); 1688 1689 /* 1690 * If SPMD think we are version 1.0 it will report version 1.0 to 1691 * normal world regardless of what version we query the SPM with. 1692 * However, if SPMD think we are version 1.1 it will forward 1693 * queries from normal world to let us negotiate version. So by 1694 * setting version 1.0 here we should be compatible. 1695 * 1696 * Note that disagreement on negotiated version means that we'll 1697 * have communication problems with normal world. 1698 */ 1699 my_rxtx.ffa_vers = FFA_VERSION_1_0; 1700 1701 return TEE_SUCCESS; 1702 } 1703 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 1704 static bool is_ffa_success(uint32_t fid) 1705 { 1706 #ifdef ARM64 1707 if (fid == FFA_SUCCESS_64) 1708 return true; 1709 #endif 1710 return fid == FFA_SUCCESS_32; 1711 } 1712 1713 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 1714 { 1715 struct thread_smc_args args = { 1716 #ifdef ARM64 1717 .a0 = FFA_RXTX_MAP_64, 1718 #else 1719 .a0 = FFA_RXTX_MAP_32, 1720 #endif 1721 .a1 = virt_to_phys(rxtx->tx), 1722 .a2 = virt_to_phys(rxtx->rx), 1723 .a3 = 1, 1724 }; 1725 1726 thread_smccc(&args); 1727 if (!is_ffa_success(args.a0)) { 1728 if (args.a0 == FFA_ERROR) 1729 EMSG("rxtx map failed with error %ld", args.a2); 1730 else 1731 EMSG("rxtx map failed"); 1732 panic(); 1733 } 1734 } 1735 1736 static uint16_t get_my_id(void) 1737 { 1738 struct thread_smc_args args = { 1739 .a0 = FFA_ID_GET, 1740 }; 1741 1742 thread_smccc(&args); 1743 if (!is_ffa_success(args.a0)) { 1744 if (args.a0 == FFA_ERROR) 1745 EMSG("Get id failed with error %ld", args.a2); 1746 else 1747 EMSG("Get id failed"); 1748 panic(); 1749 } 1750 1751 return args.a2; 1752 } 1753 1754 static uint32_t get_ffa_version(uint32_t my_version) 1755 { 1756 struct thread_smc_args args = { 1757 .a0 = FFA_VERSION, 1758 .a1 = my_version, 1759 }; 1760 1761 thread_smccc(&args); 1762 if (args.a0 & BIT(31)) { 1763 EMSG("FF-A version failed with error %ld", args.a0); 1764 panic(); 1765 } 1766 1767 return args.a0; 1768 } 1769 1770 static void *spmc_retrieve_req(uint64_t cookie, 1771 struct ffa_mem_transaction_x *trans) 1772 { 1773 struct ffa_mem_access *acc_descr_array = NULL; 1774 struct ffa_mem_access_perm *perm_descr = NULL; 1775 struct thread_smc_args args = { 1776 .a0 = FFA_MEM_RETRIEVE_REQ_32, 1777 .a3 = 0, /* Address, Using TX -> MBZ */ 1778 .a4 = 0, /* Using TX -> MBZ */ 1779 }; 1780 size_t size = 0; 1781 int rc = 0; 1782 1783 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 1784 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 1785 1786 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1787 memset(trans_descr, 0, size); 1788 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1789 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1790 trans_descr->global_handle = cookie; 1791 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1792 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1793 trans_descr->mem_access_count = 1; 1794 acc_descr_array = trans_descr->mem_access_array; 1795 } else { 1796 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 1797 1798 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1799 memset(trans_descr, 0, size); 1800 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1801 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1802 trans_descr->global_handle = cookie; 1803 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1804 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1805 trans_descr->mem_access_count = 1; 1806 trans_descr->mem_access_offs = sizeof(*trans_descr); 1807 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 1808 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 1809 sizeof(*trans_descr)); 1810 } 1811 acc_descr_array->region_offs = 0; 1812 acc_descr_array->reserved = 0; 1813 perm_descr = &acc_descr_array->access_perm; 1814 perm_descr->endpoint_id = my_endpoint_id; 1815 perm_descr->perm = FFA_MEM_ACC_RW; 1816 perm_descr->flags = 0; 1817 1818 args.a1 = size; /* Total Length */ 1819 args.a2 = size; /* Frag Length == Total length */ 1820 thread_smccc(&args); 1821 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 1822 if (args.a0 == FFA_ERROR) 1823 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 1824 cookie, (int)args.a2); 1825 else 1826 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 1827 cookie, args.a0); 1828 return NULL; 1829 } 1830 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.tx, 1831 my_rxtx.size, trans); 1832 if (rc) { 1833 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 1834 cookie, rc); 1835 return NULL; 1836 } 1837 1838 return my_rxtx.rx; 1839 } 1840 1841 void thread_spmc_relinquish(uint64_t cookie) 1842 { 1843 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 1844 struct thread_smc_args args = { 1845 .a0 = FFA_MEM_RELINQUISH, 1846 }; 1847 1848 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 1849 relinquish_desc->handle = cookie; 1850 relinquish_desc->flags = 0; 1851 relinquish_desc->endpoint_count = 1; 1852 relinquish_desc->endpoint_id_array[0] = my_endpoint_id; 1853 thread_smccc(&args); 1854 if (!is_ffa_success(args.a0)) 1855 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 1856 } 1857 1858 static int set_pages(struct ffa_address_range *regions, 1859 unsigned int num_regions, unsigned int num_pages, 1860 struct mobj_ffa *mf) 1861 { 1862 unsigned int n = 0; 1863 unsigned int idx = 0; 1864 1865 for (n = 0; n < num_regions; n++) { 1866 unsigned int page_count = READ_ONCE(regions[n].page_count); 1867 uint64_t addr = READ_ONCE(regions[n].address); 1868 1869 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 1870 return FFA_INVALID_PARAMETERS; 1871 } 1872 1873 if (idx != num_pages) 1874 return FFA_INVALID_PARAMETERS; 1875 1876 return 0; 1877 } 1878 1879 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 1880 { 1881 struct mobj_ffa *ret = NULL; 1882 struct ffa_mem_transaction_x retrieve_desc = { }; 1883 struct ffa_mem_access *descr_array = NULL; 1884 struct ffa_mem_region *descr = NULL; 1885 struct mobj_ffa *mf = NULL; 1886 unsigned int num_pages = 0; 1887 unsigned int offs = 0; 1888 void *buf = NULL; 1889 struct thread_smc_args ffa_rx_release_args = { 1890 .a0 = FFA_RX_RELEASE 1891 }; 1892 1893 /* 1894 * OP-TEE is only supporting a single mem_region while the 1895 * specification allows for more than one. 1896 */ 1897 buf = spmc_retrieve_req(cookie, &retrieve_desc); 1898 if (!buf) { 1899 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 1900 cookie); 1901 return NULL; 1902 } 1903 1904 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 1905 offs = READ_ONCE(descr_array->region_offs); 1906 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 1907 1908 num_pages = READ_ONCE(descr->total_page_count); 1909 mf = mobj_ffa_spmc_new(cookie, num_pages); 1910 if (!mf) 1911 goto out; 1912 1913 if (set_pages(descr->address_range_array, 1914 READ_ONCE(descr->address_range_count), num_pages, mf)) { 1915 mobj_ffa_spmc_delete(mf); 1916 goto out; 1917 } 1918 1919 ret = mf; 1920 1921 out: 1922 /* Release RX buffer after the mem retrieve request. */ 1923 thread_smccc(&ffa_rx_release_args); 1924 1925 return ret; 1926 } 1927 1928 static TEE_Result spmc_init(void) 1929 { 1930 unsigned int major = 0; 1931 unsigned int minor __maybe_unused = 0; 1932 uint32_t my_vers = 0; 1933 uint32_t vers = 0; 1934 1935 my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1936 vers = get_ffa_version(my_vers); 1937 major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK; 1938 minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK; 1939 DMSG("SPMC reported version %u.%u", major, minor); 1940 if (major != FFA_VERSION_MAJOR) { 1941 EMSG("Incompatible major version %u, expected %u", 1942 major, FFA_VERSION_MAJOR); 1943 panic(); 1944 } 1945 if (vers < my_vers) 1946 my_vers = vers; 1947 DMSG("Using version %u.%u", 1948 (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK, 1949 (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK); 1950 my_rxtx.ffa_vers = my_vers; 1951 1952 spmc_rxtx_map(&my_rxtx); 1953 my_endpoint_id = get_my_id(); 1954 DMSG("My endpoint ID %#x", my_endpoint_id); 1955 1956 return TEE_SUCCESS; 1957 } 1958 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 1959 1960 /* 1961 * boot_final() is always done before exiting at end of boot 1962 * initialization. In case of virtualization the init-calls are done only 1963 * once a OP-TEE partition has been created. So with virtualization we have 1964 * to initialize via boot_final() to make sure we have a value assigned 1965 * before it's used the first time. 1966 */ 1967 #ifdef CFG_NS_VIRTUALIZATION 1968 boot_final(spmc_init); 1969 #else 1970 service_init(spmc_init); 1971 #endif 1972