1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2020-2021, Linaro Limited. 4 * Copyright (c) 2019-2021, Arm Limited. All rights reserved. 5 */ 6 7 #include <assert.h> 8 #include <ffa.h> 9 #include <initcall.h> 10 #include <io.h> 11 #include <kernel/interrupt.h> 12 #include <kernel/panic.h> 13 #include <kernel/secure_partition.h> 14 #include <kernel/spinlock.h> 15 #include <kernel/spmc_sp_handler.h> 16 #include <kernel/tee_misc.h> 17 #include <kernel/thread.h> 18 #include <kernel/thread_private.h> 19 #include <kernel/thread_spmc.h> 20 #include <kernel/virtualization.h> 21 #include <mm/core_mmu.h> 22 #include <mm/mobj.h> 23 #include <optee_ffa.h> 24 #include <optee_msg.h> 25 #include <optee_rpc_cmd.h> 26 #include <string.h> 27 #include <sys/queue.h> 28 #include <tee/entry_std.h> 29 #include <tee/uuid.h> 30 #include <util.h> 31 32 #if defined(CFG_CORE_SEL1_SPMC) 33 struct mem_share_state { 34 struct mobj_ffa *mf; 35 unsigned int page_count; 36 unsigned int region_count; 37 unsigned int current_page_idx; 38 }; 39 40 struct mem_frag_state { 41 struct mem_share_state share; 42 tee_mm_entry_t *mm; 43 unsigned int frag_offset; 44 SLIST_ENTRY(mem_frag_state) link; 45 }; 46 #endif 47 48 /* Initialized in spmc_init() below */ 49 static uint16_t my_endpoint_id __nex_bss; 50 #ifdef CFG_CORE_SEL1_SPMC 51 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV | 52 FFA_PART_PROP_DIRECT_REQ_SEND | 53 #ifdef CFG_NS_VIRTUALIZATION 54 FFA_PART_PROP_NOTIF_CREATED | 55 FFA_PART_PROP_NOTIF_DESTROYED | 56 #endif 57 #ifdef ARM64 58 FFA_PART_PROP_AARCH64_STATE | 59 #endif 60 FFA_PART_PROP_IS_PE_ID; 61 62 static uint32_t my_uuid_words[] = { 63 /* 64 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1 65 * SP, or 66 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a 67 * logical partition, residing in the same exception level as the 68 * SPMC 69 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 70 */ 71 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, 72 }; 73 74 /* 75 * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized. 76 * 77 * struct ffa_rxtx::spin_lock protects the variables below from concurrent 78 * access this includes the use of content of struct ffa_rxtx::rx and 79 * @frag_state_head. 80 * 81 * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct 82 * ffa_rxtx::tx and false when it is owned by normal world. 83 * 84 * Note that we can't prevent normal world from updating the content of 85 * these buffers so we must always be careful when reading. while we hold 86 * the lock. 87 */ 88 89 static struct ffa_rxtx my_rxtx __nex_bss; 90 91 static bool is_nw_buf(struct ffa_rxtx *rxtx) 92 { 93 return rxtx == &my_rxtx; 94 } 95 96 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head = 97 SLIST_HEAD_INITIALIZER(&frag_state_head); 98 #else 99 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 100 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE); 101 static struct ffa_rxtx my_rxtx = { 102 .rx = __rx_buf, 103 .tx = __tx_buf, 104 .size = sizeof(__rx_buf), 105 }; 106 #endif 107 108 static uint32_t swap_src_dst(uint32_t src_dst) 109 { 110 return (src_dst >> 16) | (src_dst << 16); 111 } 112 113 static uint16_t get_sender_id(uint32_t src_dst) 114 { 115 return src_dst >> 16; 116 } 117 118 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst, 119 uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5) 120 { 121 *args = (struct thread_smc_args){ .a0 = fid, 122 .a1 = src_dst, 123 .a2 = w2, 124 .a3 = w3, 125 .a4 = w4, 126 .a5 = w5, }; 127 } 128 129 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx) 130 { 131 /* 132 * No locking, if the caller does concurrent calls to this it's 133 * only making a mess for itself. We must be able to renegotiate 134 * the FF-A version in order to support differing versions between 135 * the loader and the driver. 136 */ 137 if (vers < FFA_VERSION_1_1) 138 rxtx->ffa_vers = FFA_VERSION_1_0; 139 else 140 rxtx->ffa_vers = FFA_VERSION_1_1; 141 142 return rxtx->ffa_vers; 143 } 144 145 #if defined(CFG_CORE_SEL1_SPMC) 146 static void handle_features(struct thread_smc_args *args) 147 { 148 uint32_t ret_fid = 0; 149 uint32_t ret_w2 = FFA_PARAM_MBZ; 150 151 switch (args->a1) { 152 #ifdef ARM64 153 case FFA_RXTX_MAP_64: 154 #endif 155 case FFA_RXTX_MAP_32: 156 ret_fid = FFA_SUCCESS_32; 157 ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */ 158 break; 159 #ifdef ARM64 160 case FFA_MEM_SHARE_64: 161 #endif 162 case FFA_MEM_SHARE_32: 163 ret_fid = FFA_SUCCESS_32; 164 /* 165 * Partition manager supports transmission of a memory 166 * transaction descriptor in a buffer dynamically allocated 167 * by the endpoint. 168 */ 169 ret_w2 = BIT(0); 170 break; 171 172 case FFA_ERROR: 173 case FFA_VERSION: 174 case FFA_SUCCESS_32: 175 #ifdef ARM64 176 case FFA_SUCCESS_64: 177 #endif 178 case FFA_FEATURES: 179 case FFA_SPM_ID_GET: 180 case FFA_MEM_FRAG_TX: 181 case FFA_MEM_RECLAIM: 182 case FFA_MSG_SEND_DIRECT_REQ_32: 183 case FFA_INTERRUPT: 184 case FFA_PARTITION_INFO_GET: 185 case FFA_RXTX_UNMAP: 186 case FFA_RX_RELEASE: 187 case FFA_FEATURE_MANAGED_EXIT_INTR: 188 ret_fid = FFA_SUCCESS_32; 189 break; 190 default: 191 ret_fid = FFA_ERROR; 192 ret_w2 = FFA_NOT_SUPPORTED; 193 break; 194 } 195 196 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ, 197 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 198 } 199 200 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret) 201 { 202 tee_mm_entry_t *mm = NULL; 203 204 if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz)) 205 return FFA_INVALID_PARAMETERS; 206 207 mm = tee_mm_alloc(&tee_mm_shm, sz); 208 if (!mm) 209 return FFA_NO_MEMORY; 210 211 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa, 212 sz / SMALL_PAGE_SIZE, 213 MEM_AREA_NSEC_SHM)) { 214 tee_mm_free(mm); 215 return FFA_INVALID_PARAMETERS; 216 } 217 218 *va_ret = (void *)tee_mm_get_smem(mm); 219 return 0; 220 } 221 222 static void handle_spm_id_get(struct thread_smc_args *args) 223 { 224 spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id, 225 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 226 } 227 228 static void unmap_buf(void *va, size_t sz) 229 { 230 tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va); 231 232 assert(mm); 233 core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE); 234 tee_mm_free(mm); 235 } 236 237 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 238 { 239 int rc = 0; 240 uint32_t ret_fid = FFA_ERROR; 241 unsigned int sz = 0; 242 paddr_t rx_pa = 0; 243 paddr_t tx_pa = 0; 244 void *rx = NULL; 245 void *tx = NULL; 246 247 cpu_spin_lock(&rxtx->spinlock); 248 249 if (args->a3 & GENMASK_64(63, 6)) { 250 rc = FFA_INVALID_PARAMETERS; 251 goto out; 252 } 253 254 sz = args->a3 * SMALL_PAGE_SIZE; 255 if (!sz) { 256 rc = FFA_INVALID_PARAMETERS; 257 goto out; 258 } 259 /* TX/RX are swapped compared to the caller */ 260 tx_pa = args->a2; 261 rx_pa = args->a1; 262 263 if (rxtx->size) { 264 rc = FFA_DENIED; 265 goto out; 266 } 267 268 /* 269 * If the buffer comes from a SP the address is virtual and already 270 * mapped. 271 */ 272 if (is_nw_buf(rxtx)) { 273 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 274 enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM; 275 bool tx_alloced = false; 276 277 /* 278 * With virtualization we establish this mapping in 279 * the nexus mapping which then is replicated to 280 * each partition. 281 * 282 * This means that this mapping must be done before 283 * any partition is created and then must not be 284 * changed. 285 */ 286 287 /* 288 * core_mmu_add_mapping() may reuse previous 289 * mappings. First check if there's any mappings to 290 * reuse so we know how to clean up in case of 291 * failure. 292 */ 293 tx = phys_to_virt(tx_pa, mt, sz); 294 rx = phys_to_virt(rx_pa, mt, sz); 295 if (!tx) { 296 tx = core_mmu_add_mapping(mt, tx_pa, sz); 297 if (!tx) { 298 rc = FFA_NO_MEMORY; 299 goto out; 300 } 301 tx_alloced = true; 302 } 303 if (!rx) 304 rx = core_mmu_add_mapping(mt, rx_pa, sz); 305 306 if (!rx) { 307 if (tx_alloced && tx) 308 core_mmu_remove_mapping(mt, tx, sz); 309 rc = FFA_NO_MEMORY; 310 goto out; 311 } 312 } else { 313 rc = map_buf(tx_pa, sz, &tx); 314 if (rc) 315 goto out; 316 rc = map_buf(rx_pa, sz, &rx); 317 if (rc) { 318 unmap_buf(tx, sz); 319 goto out; 320 } 321 } 322 rxtx->tx = tx; 323 rxtx->rx = rx; 324 } else { 325 if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) { 326 rc = FFA_INVALID_PARAMETERS; 327 goto out; 328 } 329 330 if (!virt_to_phys((void *)tx_pa) || 331 !virt_to_phys((void *)rx_pa)) { 332 rc = FFA_INVALID_PARAMETERS; 333 goto out; 334 } 335 336 rxtx->tx = (void *)tx_pa; 337 rxtx->rx = (void *)rx_pa; 338 } 339 340 rxtx->size = sz; 341 rxtx->tx_is_mine = true; 342 ret_fid = FFA_SUCCESS_32; 343 DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx); 344 DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx); 345 out: 346 cpu_spin_unlock(&rxtx->spinlock); 347 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 348 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 349 } 350 351 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 352 { 353 uint32_t ret_fid = FFA_ERROR; 354 int rc = FFA_INVALID_PARAMETERS; 355 356 cpu_spin_lock(&rxtx->spinlock); 357 358 if (!rxtx->size) 359 goto out; 360 361 /* We don't unmap the SP memory as the SP might still use it */ 362 if (is_nw_buf(rxtx)) { 363 unmap_buf(rxtx->rx, rxtx->size); 364 unmap_buf(rxtx->tx, rxtx->size); 365 } 366 rxtx->size = 0; 367 rxtx->rx = NULL; 368 rxtx->tx = NULL; 369 ret_fid = FFA_SUCCESS_32; 370 rc = 0; 371 out: 372 cpu_spin_unlock(&rxtx->spinlock); 373 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 374 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 375 } 376 377 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx) 378 { 379 uint32_t ret_fid = 0; 380 int rc = 0; 381 382 cpu_spin_lock(&rxtx->spinlock); 383 /* The senders RX is our TX */ 384 if (!rxtx->size || rxtx->tx_is_mine) { 385 ret_fid = FFA_ERROR; 386 rc = FFA_DENIED; 387 } else { 388 ret_fid = FFA_SUCCESS_32; 389 rc = 0; 390 rxtx->tx_is_mine = true; 391 } 392 cpu_spin_unlock(&rxtx->spinlock); 393 394 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 395 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 396 } 397 398 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 399 { 400 return !w0 && !w1 && !w2 && !w3; 401 } 402 403 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3) 404 { 405 /* 406 * This depends on which UUID we have been assigned. 407 * TODO add a generic mechanism to obtain our UUID. 408 * 409 * The test below is for the hard coded UUID 410 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b 411 */ 412 return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] && 413 w2 == my_uuid_words[2] && w3 == my_uuid_words[3]; 414 } 415 416 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen, 417 size_t idx, uint16_t endpoint_id, 418 uint16_t execution_context, 419 uint32_t part_props, 420 const uint32_t uuid_words[4]) 421 { 422 struct ffa_partition_info_x *fpi = NULL; 423 size_t fpi_size = sizeof(*fpi); 424 425 if (ffa_vers >= FFA_VERSION_1_1) 426 fpi_size += FFA_UUID_SIZE; 427 428 if ((idx + 1) * fpi_size > blen) 429 return TEE_ERROR_OUT_OF_MEMORY; 430 431 fpi = (void *)((vaddr_t)buf + idx * fpi_size); 432 fpi->id = endpoint_id; 433 /* Number of execution contexts implemented by this partition */ 434 fpi->execution_context = execution_context; 435 436 fpi->partition_properties = part_props; 437 438 if (ffa_vers >= FFA_VERSION_1_1) { 439 if (uuid_words) 440 memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE); 441 else 442 memset(fpi->uuid, 0, FFA_UUID_SIZE); 443 } 444 445 return TEE_SUCCESS; 446 } 447 448 static int handle_partition_info_get_all(size_t *elem_count, 449 struct ffa_rxtx *rxtx, bool count_only) 450 { 451 if (!count_only) { 452 /* Add OP-TEE SP */ 453 if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx, 454 rxtx->size, 0, my_endpoint_id, 455 CFG_TEE_CORE_NB_CORE, 456 my_part_props, my_uuid_words)) 457 return FFA_NO_MEMORY; 458 } 459 *elem_count = 1; 460 461 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 462 if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size, 463 NULL, elem_count, count_only)) 464 return FFA_NO_MEMORY; 465 } 466 467 return FFA_OK; 468 } 469 470 void spmc_handle_partition_info_get(struct thread_smc_args *args, 471 struct ffa_rxtx *rxtx) 472 { 473 TEE_Result res = TEE_SUCCESS; 474 uint32_t ret_fid = FFA_ERROR; 475 uint32_t rc = 0; 476 bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG; 477 478 if (!count_only) { 479 cpu_spin_lock(&rxtx->spinlock); 480 481 if (!rxtx->size || !rxtx->tx_is_mine) { 482 rc = FFA_BUSY; 483 goto out; 484 } 485 } 486 487 if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) { 488 size_t elem_count = 0; 489 490 ret_fid = handle_partition_info_get_all(&elem_count, rxtx, 491 count_only); 492 493 if (ret_fid) { 494 rc = ret_fid; 495 ret_fid = FFA_ERROR; 496 } else { 497 ret_fid = FFA_SUCCESS_32; 498 rc = elem_count; 499 } 500 501 goto out; 502 } 503 504 if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) { 505 if (!count_only) { 506 res = spmc_fill_partition_entry(rxtx->ffa_vers, 507 rxtx->tx, rxtx->size, 0, 508 my_endpoint_id, 509 CFG_TEE_CORE_NB_CORE, 510 my_part_props, 511 my_uuid_words); 512 if (res) { 513 ret_fid = FFA_ERROR; 514 rc = FFA_INVALID_PARAMETERS; 515 goto out; 516 } 517 } 518 rc = 1; 519 } else if (IS_ENABLED(CFG_SECURE_PARTITION)) { 520 uint32_t uuid_array[4] = { 0 }; 521 TEE_UUID uuid = { }; 522 size_t count = 0; 523 524 uuid_array[0] = args->a1; 525 uuid_array[1] = args->a2; 526 uuid_array[2] = args->a3; 527 uuid_array[3] = args->a4; 528 tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array); 529 530 res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, 531 rxtx->size, &uuid, &count, 532 count_only); 533 if (res != TEE_SUCCESS) { 534 ret_fid = FFA_ERROR; 535 rc = FFA_INVALID_PARAMETERS; 536 goto out; 537 } 538 rc = count; 539 } else { 540 ret_fid = FFA_ERROR; 541 rc = FFA_INVALID_PARAMETERS; 542 goto out; 543 } 544 545 ret_fid = FFA_SUCCESS_32; 546 547 out: 548 spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ, 549 FFA_PARAM_MBZ, FFA_PARAM_MBZ); 550 if (!count_only) { 551 rxtx->tx_is_mine = false; 552 cpu_spin_unlock(&rxtx->spinlock); 553 } 554 } 555 #endif /*CFG_CORE_SEL1_SPMC*/ 556 557 static void handle_yielding_call(struct thread_smc_args *args) 558 { 559 TEE_Result res = 0; 560 561 thread_check_canaries(); 562 563 if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) { 564 /* Note connection to struct thread_rpc_arg::ret */ 565 thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6, 566 0); 567 res = TEE_ERROR_BAD_PARAMETERS; 568 } else { 569 thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5, 570 args->a6, args->a7); 571 res = TEE_ERROR_BUSY; 572 } 573 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 574 swap_src_dst(args->a1), 0, res, 0, 0); 575 } 576 577 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5) 578 { 579 uint64_t cookie = reg_pair_to_64(a5, a4); 580 uint32_t res = 0; 581 582 res = mobj_ffa_unregister_by_cookie(cookie); 583 switch (res) { 584 case TEE_SUCCESS: 585 case TEE_ERROR_ITEM_NOT_FOUND: 586 return 0; 587 case TEE_ERROR_BUSY: 588 EMSG("res %#"PRIx32, res); 589 return FFA_BUSY; 590 default: 591 EMSG("res %#"PRIx32, res); 592 return FFA_INVALID_PARAMETERS; 593 } 594 } 595 596 static void handle_blocking_call(struct thread_smc_args *args) 597 { 598 switch (args->a3) { 599 case OPTEE_FFA_GET_API_VERSION: 600 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 601 swap_src_dst(args->a1), 0, 602 OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR, 603 0); 604 break; 605 case OPTEE_FFA_GET_OS_VERSION: 606 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 607 swap_src_dst(args->a1), 0, 608 CFG_OPTEE_REVISION_MAJOR, 609 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1); 610 break; 611 case OPTEE_FFA_EXCHANGE_CAPABILITIES: 612 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 613 swap_src_dst(args->a1), 0, 0, 614 THREAD_RPC_MAX_NUM_PARAMS, 615 OPTEE_FFA_SEC_CAP_ARG_OFFSET); 616 break; 617 case OPTEE_FFA_UNREGISTER_SHM: 618 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 619 swap_src_dst(args->a1), 0, 620 handle_unregister_shm(args->a4, args->a5), 0, 0); 621 break; 622 default: 623 EMSG("Unhandled blocking service ID %#"PRIx32, 624 (uint32_t)args->a3); 625 panic(); 626 } 627 } 628 629 static void handle_framework_direct_request(struct thread_smc_args *args, 630 struct ffa_rxtx *rxtx) 631 { 632 uint32_t w0 = FFA_ERROR; 633 uint32_t w1 = FFA_PARAM_MBZ; 634 uint32_t w2 = FFA_NOT_SUPPORTED; 635 uint32_t w3 = FFA_PARAM_MBZ; 636 637 switch (args->a2 & FFA_MSG_TYPE_MASK) { 638 case FFA_MSG_SEND_VM_CREATED: 639 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 640 uint16_t guest_id = args->a5; 641 TEE_Result res = virt_guest_created(guest_id); 642 643 w0 = FFA_MSG_SEND_DIRECT_RESP_32; 644 w1 = swap_src_dst(args->a1); 645 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED; 646 if (res == TEE_SUCCESS) 647 w3 = FFA_OK; 648 else if (res == TEE_ERROR_OUT_OF_MEMORY) 649 w3 = FFA_DENIED; 650 else 651 w3 = FFA_INVALID_PARAMETERS; 652 } 653 break; 654 case FFA_MSG_VERSION_REQ: 655 w0 = FFA_MSG_SEND_DIRECT_RESP_32; 656 w1 = swap_src_dst(args->a1); 657 w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP; 658 w3 = spmc_exchange_version(args->a3, rxtx); 659 break; 660 default: 661 break; 662 } 663 spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 664 } 665 666 static void handle_direct_request(struct thread_smc_args *args, 667 struct ffa_rxtx *rxtx) 668 { 669 if (IS_ENABLED(CFG_SECURE_PARTITION) && 670 FFA_DST(args->a1) != my_endpoint_id) { 671 spmc_sp_start_thread(args); 672 return; 673 } 674 675 if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) { 676 handle_framework_direct_request(args, rxtx); 677 return; 678 } 679 680 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 681 virt_set_guest(get_sender_id(args->a1))) { 682 spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, 683 swap_src_dst(args->a1), 0, 684 TEE_ERROR_ITEM_NOT_FOUND, 0, 0); 685 return; 686 } 687 688 if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT)) 689 handle_yielding_call(args); 690 else 691 handle_blocking_call(args); 692 693 /* 694 * Note that handle_yielding_call() typically only returns if a 695 * thread cannot be allocated or found. virt_unset_guest() is also 696 * called from thread_state_suspend() and thread_state_free(). 697 */ 698 virt_unset_guest(); 699 } 700 701 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen, 702 struct ffa_mem_transaction_x *trans) 703 { 704 uint16_t mem_reg_attr = 0; 705 uint32_t flags = 0; 706 uint32_t count = 0; 707 uint32_t offs = 0; 708 uint32_t size = 0; 709 size_t n = 0; 710 711 if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t)) 712 return FFA_INVALID_PARAMETERS; 713 714 if (ffa_vers >= FFA_VERSION_1_1) { 715 struct ffa_mem_transaction_1_1 *descr = NULL; 716 717 if (blen < sizeof(*descr)) 718 return FFA_INVALID_PARAMETERS; 719 720 descr = buf; 721 trans->sender_id = READ_ONCE(descr->sender_id); 722 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 723 flags = READ_ONCE(descr->flags); 724 trans->global_handle = READ_ONCE(descr->global_handle); 725 trans->tag = READ_ONCE(descr->tag); 726 727 count = READ_ONCE(descr->mem_access_count); 728 size = READ_ONCE(descr->mem_access_size); 729 offs = READ_ONCE(descr->mem_access_offs); 730 } else { 731 struct ffa_mem_transaction_1_0 *descr = NULL; 732 733 if (blen < sizeof(*descr)) 734 return FFA_INVALID_PARAMETERS; 735 736 descr = buf; 737 trans->sender_id = READ_ONCE(descr->sender_id); 738 mem_reg_attr = READ_ONCE(descr->mem_reg_attr); 739 flags = READ_ONCE(descr->flags); 740 trans->global_handle = READ_ONCE(descr->global_handle); 741 trans->tag = READ_ONCE(descr->tag); 742 743 count = READ_ONCE(descr->mem_access_count); 744 size = sizeof(struct ffa_mem_access); 745 offs = offsetof(struct ffa_mem_transaction_1_0, 746 mem_access_array); 747 } 748 749 if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX || 750 size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX) 751 return FFA_INVALID_PARAMETERS; 752 753 /* Check that the endpoint memory access descriptor array fits */ 754 if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) || 755 n > blen) 756 return FFA_INVALID_PARAMETERS; 757 758 trans->mem_reg_attr = mem_reg_attr; 759 trans->flags = flags; 760 trans->mem_access_size = size; 761 trans->mem_access_count = count; 762 trans->mem_access_offs = offs; 763 return 0; 764 } 765 766 #if defined(CFG_CORE_SEL1_SPMC) 767 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size, 768 unsigned int mem_access_count, uint8_t *acc_perms, 769 unsigned int *region_offs) 770 { 771 struct ffa_mem_access_perm *descr = NULL; 772 struct ffa_mem_access *mem_acc = NULL; 773 unsigned int n = 0; 774 775 for (n = 0; n < mem_access_count; n++) { 776 mem_acc = (void *)(mem_acc_base + mem_access_size * n); 777 descr = &mem_acc->access_perm; 778 if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) { 779 *acc_perms = READ_ONCE(descr->perm); 780 *region_offs = READ_ONCE(mem_acc[n].region_offs); 781 return 0; 782 } 783 } 784 785 return FFA_INVALID_PARAMETERS; 786 } 787 788 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf, 789 size_t blen, unsigned int *page_count, 790 unsigned int *region_count, size_t *addr_range_offs) 791 { 792 const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 793 const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW; 794 struct ffa_mem_region *region_descr = NULL; 795 unsigned int region_descr_offs = 0; 796 uint8_t mem_acc_perm = 0; 797 size_t n = 0; 798 799 if (mem_trans->mem_reg_attr != exp_mem_reg_attr) 800 return FFA_INVALID_PARAMETERS; 801 802 /* Check that the access permissions matches what's expected */ 803 if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs, 804 mem_trans->mem_access_size, 805 mem_trans->mem_access_count, 806 &mem_acc_perm, ®ion_descr_offs) || 807 mem_acc_perm != exp_mem_acc_perm) 808 return FFA_INVALID_PARAMETERS; 809 810 /* Check that the Composite memory region descriptor fits */ 811 if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) || 812 n > blen) 813 return FFA_INVALID_PARAMETERS; 814 815 if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs, 816 struct ffa_mem_region)) 817 return FFA_INVALID_PARAMETERS; 818 819 region_descr = (struct ffa_mem_region *)((vaddr_t)buf + 820 region_descr_offs); 821 *page_count = READ_ONCE(region_descr->total_page_count); 822 *region_count = READ_ONCE(region_descr->address_range_count); 823 *addr_range_offs = n; 824 return 0; 825 } 826 827 static int add_mem_share_helper(struct mem_share_state *s, void *buf, 828 size_t flen) 829 { 830 unsigned int region_count = flen / sizeof(struct ffa_address_range); 831 struct ffa_address_range *arange = NULL; 832 unsigned int n = 0; 833 834 if (region_count > s->region_count) 835 region_count = s->region_count; 836 837 if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range)) 838 return FFA_INVALID_PARAMETERS; 839 arange = buf; 840 841 for (n = 0; n < region_count; n++) { 842 unsigned int page_count = READ_ONCE(arange[n].page_count); 843 uint64_t addr = READ_ONCE(arange[n].address); 844 845 if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx, 846 addr, page_count)) 847 return FFA_INVALID_PARAMETERS; 848 } 849 850 s->region_count -= region_count; 851 if (s->region_count) 852 return region_count * sizeof(*arange); 853 854 if (s->current_page_idx != s->page_count) 855 return FFA_INVALID_PARAMETERS; 856 857 return 0; 858 } 859 860 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen) 861 { 862 int rc = 0; 863 864 rc = add_mem_share_helper(&s->share, buf, flen); 865 if (rc >= 0) { 866 if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) { 867 /* We're not at the end of the descriptor yet */ 868 if (s->share.region_count) 869 return s->frag_offset; 870 871 /* We're done */ 872 rc = 0; 873 } else { 874 rc = FFA_INVALID_PARAMETERS; 875 } 876 } 877 878 SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link); 879 if (rc < 0) 880 mobj_ffa_sel1_spmc_delete(s->share.mf); 881 else 882 mobj_ffa_push_to_inactive(s->share.mf); 883 free(s); 884 885 return rc; 886 } 887 888 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans, 889 void *buf) 890 { 891 struct ffa_mem_access_perm *perm = NULL; 892 struct ffa_mem_access *mem_acc = NULL; 893 894 if (!IS_ENABLED(CFG_SECURE_PARTITION)) 895 return false; 896 897 if (mem_trans->mem_access_count < 1) 898 return false; 899 900 mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs); 901 perm = &mem_acc->access_perm; 902 903 /* 904 * perm->endpoint_id is read here only to check if the endpoint is 905 * OP-TEE. We do read it later on again, but there are some additional 906 * checks there to make sure that the data is correct. 907 */ 908 return READ_ONCE(perm->endpoint_id) != my_endpoint_id; 909 } 910 911 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans, 912 tee_mm_entry_t *mm, void *buf, size_t blen, 913 size_t flen, uint64_t *global_handle) 914 { 915 int rc = 0; 916 struct mem_share_state share = { }; 917 size_t addr_range_offs = 0; 918 uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 919 size_t n = 0; 920 921 rc = mem_share_init(mem_trans, buf, flen, &share.page_count, 922 &share.region_count, &addr_range_offs); 923 if (rc) 924 return rc; 925 926 if (MUL_OVERFLOW(share.region_count, 927 sizeof(struct ffa_address_range), &n) || 928 ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen) 929 return FFA_INVALID_PARAMETERS; 930 931 if (mem_trans->global_handle) 932 cookie = mem_trans->global_handle; 933 share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count); 934 if (!share.mf) 935 return FFA_NO_MEMORY; 936 937 if (flen != blen) { 938 struct mem_frag_state *s = calloc(sizeof(*s), 1); 939 940 if (!s) { 941 rc = FFA_NO_MEMORY; 942 goto err; 943 } 944 s->share = share; 945 s->mm = mm; 946 s->frag_offset = addr_range_offs; 947 948 SLIST_INSERT_HEAD(&frag_state_head, s, link); 949 rc = add_mem_share_frag(s, (char *)buf + addr_range_offs, 950 flen - addr_range_offs); 951 952 if (rc >= 0) 953 *global_handle = mobj_ffa_get_cookie(share.mf); 954 955 return rc; 956 } 957 958 rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs, 959 flen - addr_range_offs); 960 if (rc) { 961 /* 962 * Number of consumed bytes may be returned instead of 0 for 963 * done. 964 */ 965 rc = FFA_INVALID_PARAMETERS; 966 goto err; 967 } 968 969 *global_handle = mobj_ffa_push_to_inactive(share.mf); 970 971 return 0; 972 err: 973 mobj_ffa_sel1_spmc_delete(share.mf); 974 return rc; 975 } 976 977 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen, 978 unsigned int page_count, 979 uint64_t *global_handle, struct ffa_rxtx *rxtx) 980 { 981 struct ffa_mem_transaction_x mem_trans = { }; 982 int rc = 0; 983 size_t len = 0; 984 void *buf = NULL; 985 tee_mm_entry_t *mm = NULL; 986 vaddr_t offs = pbuf & SMALL_PAGE_MASK; 987 988 if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len)) 989 return FFA_INVALID_PARAMETERS; 990 if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len)) 991 return FFA_INVALID_PARAMETERS; 992 993 /* 994 * Check that the length reported in flen is covered by len even 995 * if the offset is taken into account. 996 */ 997 if (len < flen || len - offs < flen) 998 return FFA_INVALID_PARAMETERS; 999 1000 mm = tee_mm_alloc(&tee_mm_shm, len); 1001 if (!mm) 1002 return FFA_NO_MEMORY; 1003 1004 if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf, 1005 page_count, MEM_AREA_NSEC_SHM)) { 1006 rc = FFA_INVALID_PARAMETERS; 1007 goto out; 1008 } 1009 buf = (void *)(tee_mm_get_smem(mm) + offs); 1010 1011 cpu_spin_lock(&rxtx->spinlock); 1012 rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans); 1013 if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1014 virt_set_guest(mem_trans.sender_id)) 1015 rc = FFA_DENIED; 1016 if (!rc) 1017 rc = add_mem_share(&mem_trans, mm, buf, blen, flen, 1018 global_handle); 1019 virt_unset_guest(); 1020 cpu_spin_unlock(&rxtx->spinlock); 1021 if (rc > 0) 1022 return rc; 1023 1024 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1025 out: 1026 tee_mm_free(mm); 1027 return rc; 1028 } 1029 1030 static int handle_mem_share_rxbuf(size_t blen, size_t flen, 1031 uint64_t *global_handle, 1032 struct ffa_rxtx *rxtx) 1033 { 1034 struct ffa_mem_transaction_x mem_trans = { }; 1035 int rc = FFA_DENIED; 1036 1037 cpu_spin_lock(&rxtx->spinlock); 1038 1039 if (!rxtx->rx || flen > rxtx->size) 1040 goto out; 1041 1042 rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen, 1043 &mem_trans); 1044 if (rc) 1045 goto out; 1046 if (is_sp_share(&mem_trans, rxtx->rx)) { 1047 rc = spmc_sp_add_share(&mem_trans, rxtx, blen, 1048 global_handle, NULL); 1049 goto out; 1050 } 1051 1052 if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && 1053 virt_set_guest(mem_trans.sender_id)) 1054 goto out; 1055 1056 rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen, 1057 global_handle); 1058 1059 virt_unset_guest(); 1060 1061 out: 1062 cpu_spin_unlock(&rxtx->spinlock); 1063 1064 return rc; 1065 } 1066 1067 static void handle_mem_share(struct thread_smc_args *args, 1068 struct ffa_rxtx *rxtx) 1069 { 1070 uint32_t tot_len = args->a1; 1071 uint32_t frag_len = args->a2; 1072 uint64_t addr = args->a3; 1073 uint32_t page_count = args->a4; 1074 uint32_t ret_w1 = 0; 1075 uint32_t ret_w2 = FFA_INVALID_PARAMETERS; 1076 uint32_t ret_w3 = 0; 1077 uint32_t ret_fid = FFA_ERROR; 1078 uint64_t global_handle = 0; 1079 int rc = 0; 1080 1081 /* Check that the MBZs are indeed 0 */ 1082 if (args->a5 || args->a6 || args->a7) 1083 goto out; 1084 1085 /* Check that fragment length doesn't exceed total length */ 1086 if (frag_len > tot_len) 1087 goto out; 1088 1089 /* Check for 32-bit calling convention */ 1090 if (args->a0 == FFA_MEM_SHARE_32) 1091 addr &= UINT32_MAX; 1092 1093 if (!addr) { 1094 /* 1095 * The memory transaction descriptor is passed via our rx 1096 * buffer. 1097 */ 1098 if (page_count) 1099 goto out; 1100 rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle, 1101 rxtx); 1102 } else { 1103 rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count, 1104 &global_handle, rxtx); 1105 } 1106 if (rc < 0) { 1107 ret_w2 = rc; 1108 } else if (rc > 0) { 1109 ret_fid = FFA_MEM_FRAG_RX; 1110 ret_w3 = rc; 1111 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1112 } else { 1113 ret_fid = FFA_SUCCESS_32; 1114 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1115 } 1116 out: 1117 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1118 } 1119 1120 static struct mem_frag_state *get_frag_state(uint64_t global_handle) 1121 { 1122 struct mem_frag_state *s = NULL; 1123 1124 SLIST_FOREACH(s, &frag_state_head, link) 1125 if (mobj_ffa_get_cookie(s->share.mf) == global_handle) 1126 return s; 1127 1128 return NULL; 1129 } 1130 1131 static void handle_mem_frag_tx(struct thread_smc_args *args, 1132 struct ffa_rxtx *rxtx) 1133 { 1134 uint64_t global_handle = reg_pair_to_64(args->a2, args->a1); 1135 size_t flen = args->a3; 1136 uint32_t endpoint_id = args->a4; 1137 struct mem_frag_state *s = NULL; 1138 tee_mm_entry_t *mm = NULL; 1139 unsigned int page_count = 0; 1140 void *buf = NULL; 1141 uint32_t ret_w1 = 0; 1142 uint32_t ret_w2 = 0; 1143 uint32_t ret_w3 = 0; 1144 uint32_t ret_fid = 0; 1145 int rc = 0; 1146 1147 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1148 uint16_t guest_id = endpoint_id >> 16; 1149 1150 if (!guest_id || virt_set_guest(guest_id)) { 1151 rc = FFA_INVALID_PARAMETERS; 1152 goto out_set_rc; 1153 } 1154 } 1155 1156 /* 1157 * Currently we're only doing this for fragmented FFA_MEM_SHARE_* 1158 * requests. 1159 */ 1160 1161 cpu_spin_lock(&rxtx->spinlock); 1162 1163 s = get_frag_state(global_handle); 1164 if (!s) { 1165 rc = FFA_INVALID_PARAMETERS; 1166 goto out; 1167 } 1168 1169 mm = s->mm; 1170 if (mm) { 1171 if (flen > tee_mm_get_bytes(mm)) { 1172 rc = FFA_INVALID_PARAMETERS; 1173 goto out; 1174 } 1175 page_count = s->share.page_count; 1176 buf = (void *)tee_mm_get_smem(mm); 1177 } else { 1178 if (flen > rxtx->size) { 1179 rc = FFA_INVALID_PARAMETERS; 1180 goto out; 1181 } 1182 buf = rxtx->rx; 1183 } 1184 1185 rc = add_mem_share_frag(s, buf, flen); 1186 out: 1187 virt_unset_guest(); 1188 cpu_spin_unlock(&rxtx->spinlock); 1189 1190 if (rc <= 0 && mm) { 1191 core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count); 1192 tee_mm_free(mm); 1193 } 1194 1195 out_set_rc: 1196 if (rc < 0) { 1197 ret_fid = FFA_ERROR; 1198 ret_w2 = rc; 1199 } else if (rc > 0) { 1200 ret_fid = FFA_MEM_FRAG_RX; 1201 ret_w3 = rc; 1202 reg_pair_from_64(global_handle, &ret_w2, &ret_w1); 1203 } else { 1204 ret_fid = FFA_SUCCESS_32; 1205 reg_pair_from_64(global_handle, &ret_w3, &ret_w2); 1206 } 1207 1208 spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0); 1209 } 1210 1211 static void handle_mem_reclaim(struct thread_smc_args *args) 1212 { 1213 uint32_t ret_val = FFA_INVALID_PARAMETERS; 1214 uint32_t ret_fid = FFA_ERROR; 1215 uint64_t cookie = 0; 1216 1217 if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7) 1218 goto out; 1219 1220 cookie = reg_pair_to_64(args->a2, args->a1); 1221 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 1222 uint16_t guest_id = 0; 1223 1224 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 1225 guest_id = virt_find_guest_by_cookie(cookie); 1226 } else { 1227 guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) & 1228 FFA_MEMORY_HANDLE_PRTN_MASK; 1229 } 1230 if (!guest_id || virt_set_guest(guest_id)) 1231 goto out; 1232 } 1233 1234 switch (mobj_ffa_sel1_spmc_reclaim(cookie)) { 1235 case TEE_SUCCESS: 1236 ret_fid = FFA_SUCCESS_32; 1237 ret_val = 0; 1238 break; 1239 case TEE_ERROR_ITEM_NOT_FOUND: 1240 DMSG("cookie %#"PRIx64" not found", cookie); 1241 ret_val = FFA_INVALID_PARAMETERS; 1242 break; 1243 default: 1244 DMSG("cookie %#"PRIx64" busy", cookie); 1245 ret_val = FFA_DENIED; 1246 break; 1247 } 1248 1249 virt_unset_guest(); 1250 1251 out: 1252 spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0); 1253 } 1254 #endif 1255 1256 /* Only called from assembly */ 1257 void thread_spmc_msg_recv(struct thread_smc_args *args); 1258 void thread_spmc_msg_recv(struct thread_smc_args *args) 1259 { 1260 assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL); 1261 switch (args->a0) { 1262 #if defined(CFG_CORE_SEL1_SPMC) 1263 case FFA_FEATURES: 1264 handle_features(args); 1265 break; 1266 case FFA_SPM_ID_GET: 1267 handle_spm_id_get(args); 1268 break; 1269 #ifdef ARM64 1270 case FFA_RXTX_MAP_64: 1271 #endif 1272 case FFA_RXTX_MAP_32: 1273 spmc_handle_rxtx_map(args, &my_rxtx); 1274 break; 1275 case FFA_RXTX_UNMAP: 1276 spmc_handle_rxtx_unmap(args, &my_rxtx); 1277 break; 1278 case FFA_RX_RELEASE: 1279 spmc_handle_rx_release(args, &my_rxtx); 1280 break; 1281 case FFA_PARTITION_INFO_GET: 1282 spmc_handle_partition_info_get(args, &my_rxtx); 1283 break; 1284 #endif /*CFG_CORE_SEL1_SPMC*/ 1285 case FFA_INTERRUPT: 1286 interrupt_main_handler(); 1287 spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0); 1288 break; 1289 #ifdef ARM64 1290 case FFA_MSG_SEND_DIRECT_REQ_64: 1291 #endif 1292 case FFA_MSG_SEND_DIRECT_REQ_32: 1293 handle_direct_request(args, &my_rxtx); 1294 break; 1295 #if defined(CFG_CORE_SEL1_SPMC) 1296 #ifdef ARM64 1297 case FFA_MEM_SHARE_64: 1298 #endif 1299 case FFA_MEM_SHARE_32: 1300 handle_mem_share(args, &my_rxtx); 1301 break; 1302 case FFA_MEM_RECLAIM: 1303 if (!IS_ENABLED(CFG_SECURE_PARTITION) || 1304 !ffa_mem_reclaim(args, NULL)) 1305 handle_mem_reclaim(args); 1306 break; 1307 case FFA_MEM_FRAG_TX: 1308 handle_mem_frag_tx(args, &my_rxtx); 1309 break; 1310 #endif /*CFG_CORE_SEL1_SPMC*/ 1311 default: 1312 EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0); 1313 spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED, 1314 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ); 1315 } 1316 } 1317 1318 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset) 1319 { 1320 size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1321 struct thread_ctx *thr = threads + thread_get_id(); 1322 TEE_Result res = TEE_ERROR_BAD_PARAMETERS; 1323 struct optee_msg_arg *arg = NULL; 1324 struct mobj *mobj = NULL; 1325 uint32_t num_params = 0; 1326 size_t sz = 0; 1327 1328 mobj = mobj_ffa_get_by_cookie(cookie, 0); 1329 if (!mobj) { 1330 EMSG("Can't find cookie %#"PRIx64, cookie); 1331 return TEE_ERROR_BAD_PARAMETERS; 1332 } 1333 1334 res = mobj_inc_map(mobj); 1335 if (res) 1336 goto out_put_mobj; 1337 1338 res = TEE_ERROR_BAD_PARAMETERS; 1339 arg = mobj_get_va(mobj, offset, sizeof(*arg)); 1340 if (!arg) 1341 goto out_dec_map; 1342 1343 num_params = READ_ONCE(arg->num_params); 1344 if (num_params > OPTEE_MSG_MAX_NUM_PARAMS) 1345 goto out_dec_map; 1346 1347 sz = OPTEE_MSG_GET_ARG_SIZE(num_params); 1348 1349 thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc); 1350 if (!thr->rpc_arg) 1351 goto out_dec_map; 1352 1353 virt_on_stdcall(); 1354 res = tee_entry_std(arg, num_params); 1355 1356 thread_rpc_shm_cache_clear(&thr->shm_cache); 1357 thr->rpc_arg = NULL; 1358 1359 out_dec_map: 1360 mobj_dec_map(mobj); 1361 out_put_mobj: 1362 mobj_put(mobj); 1363 return res; 1364 } 1365 1366 /* 1367 * Helper routine for the assembly function thread_std_smc_entry() 1368 * 1369 * Note: this function is weak just to make link_dummies_paged.c happy. 1370 */ 1371 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, 1372 uint32_t a2, uint32_t a3, 1373 uint32_t a4, uint32_t a5 __unused) 1374 { 1375 /* 1376 * Arguments are supplied from handle_yielding_call() as: 1377 * a0 <- w1 1378 * a1 <- w3 1379 * a2 <- w4 1380 * a3 <- w5 1381 * a4 <- w6 1382 * a5 <- w7 1383 */ 1384 thread_get_tsd()->rpc_target_info = swap_src_dst(a0); 1385 if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG) 1386 return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4); 1387 return FFA_DENIED; 1388 } 1389 1390 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm) 1391 { 1392 uint64_t offs = tpm->u.memref.offs; 1393 1394 param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN + 1395 OPTEE_MSG_ATTR_TYPE_FMEM_INPUT; 1396 1397 param->u.fmem.offs_low = offs; 1398 param->u.fmem.offs_high = offs >> 32; 1399 if (param->u.fmem.offs_high != offs >> 32) 1400 return false; 1401 1402 param->u.fmem.size = tpm->u.memref.size; 1403 if (tpm->u.memref.mobj) { 1404 uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj); 1405 1406 /* If a mobj is passed it better be one with a valid cookie. */ 1407 if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID) 1408 return false; 1409 param->u.fmem.global_id = cookie; 1410 } else { 1411 param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID; 1412 } 1413 1414 return true; 1415 } 1416 1417 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params, 1418 struct thread_param *params, 1419 struct optee_msg_arg **arg_ret) 1420 { 1421 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1422 struct thread_ctx *thr = threads + thread_get_id(); 1423 struct optee_msg_arg *arg = thr->rpc_arg; 1424 1425 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1426 return TEE_ERROR_BAD_PARAMETERS; 1427 1428 if (!arg) { 1429 EMSG("rpc_arg not set"); 1430 return TEE_ERROR_GENERIC; 1431 } 1432 1433 memset(arg, 0, sz); 1434 arg->cmd = cmd; 1435 arg->num_params = num_params; 1436 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1437 1438 for (size_t n = 0; n < num_params; n++) { 1439 switch (params[n].attr) { 1440 case THREAD_PARAM_ATTR_NONE: 1441 arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE; 1442 break; 1443 case THREAD_PARAM_ATTR_VALUE_IN: 1444 case THREAD_PARAM_ATTR_VALUE_OUT: 1445 case THREAD_PARAM_ATTR_VALUE_INOUT: 1446 arg->params[n].attr = params[n].attr - 1447 THREAD_PARAM_ATTR_VALUE_IN + 1448 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1449 arg->params[n].u.value.a = params[n].u.value.a; 1450 arg->params[n].u.value.b = params[n].u.value.b; 1451 arg->params[n].u.value.c = params[n].u.value.c; 1452 break; 1453 case THREAD_PARAM_ATTR_MEMREF_IN: 1454 case THREAD_PARAM_ATTR_MEMREF_OUT: 1455 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1456 if (!set_fmem(arg->params + n, params + n)) 1457 return TEE_ERROR_BAD_PARAMETERS; 1458 break; 1459 default: 1460 return TEE_ERROR_BAD_PARAMETERS; 1461 } 1462 } 1463 1464 if (arg_ret) 1465 *arg_ret = arg; 1466 1467 return TEE_SUCCESS; 1468 } 1469 1470 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params, 1471 struct thread_param *params) 1472 { 1473 for (size_t n = 0; n < num_params; n++) { 1474 switch (params[n].attr) { 1475 case THREAD_PARAM_ATTR_VALUE_OUT: 1476 case THREAD_PARAM_ATTR_VALUE_INOUT: 1477 params[n].u.value.a = arg->params[n].u.value.a; 1478 params[n].u.value.b = arg->params[n].u.value.b; 1479 params[n].u.value.c = arg->params[n].u.value.c; 1480 break; 1481 case THREAD_PARAM_ATTR_MEMREF_OUT: 1482 case THREAD_PARAM_ATTR_MEMREF_INOUT: 1483 params[n].u.memref.size = arg->params[n].u.fmem.size; 1484 break; 1485 default: 1486 break; 1487 } 1488 } 1489 1490 return arg->ret; 1491 } 1492 1493 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1494 struct thread_param *params) 1495 { 1496 struct thread_rpc_arg rpc_arg = { .call = { 1497 .w1 = thread_get_tsd()->rpc_target_info, 1498 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1499 }, 1500 }; 1501 struct optee_msg_arg *arg = NULL; 1502 uint32_t ret = 0; 1503 1504 ret = get_rpc_arg(cmd, num_params, params, &arg); 1505 if (ret) 1506 return ret; 1507 1508 thread_rpc(&rpc_arg); 1509 1510 return get_rpc_arg_res(arg, num_params, params); 1511 } 1512 1513 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1514 { 1515 struct thread_rpc_arg rpc_arg = { .call = { 1516 .w1 = thread_get_tsd()->rpc_target_info, 1517 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1518 }, 1519 }; 1520 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0); 1521 uint32_t res2 = 0; 1522 uint32_t res = 0; 1523 1524 DMSG("freeing cookie %#"PRIx64, cookie); 1525 1526 res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, ¶m, NULL); 1527 1528 mobj_put(mobj); 1529 res2 = mobj_ffa_unregister_by_cookie(cookie); 1530 if (res2) 1531 DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32, 1532 cookie, res2); 1533 if (!res) 1534 thread_rpc(&rpc_arg); 1535 } 1536 1537 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1538 { 1539 struct thread_rpc_arg rpc_arg = { .call = { 1540 .w1 = thread_get_tsd()->rpc_target_info, 1541 .w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD, 1542 }, 1543 }; 1544 struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align); 1545 struct optee_msg_arg *arg = NULL; 1546 unsigned int internal_offset = 0; 1547 struct mobj *mobj = NULL; 1548 uint64_t cookie = 0; 1549 1550 if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, ¶m, &arg)) 1551 return NULL; 1552 1553 thread_rpc(&rpc_arg); 1554 1555 if (arg->num_params != 1 || 1556 arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT) 1557 return NULL; 1558 1559 internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs); 1560 cookie = READ_ONCE(arg->params->u.fmem.global_id); 1561 mobj = mobj_ffa_get_by_cookie(cookie, internal_offset); 1562 if (!mobj) { 1563 DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed", 1564 cookie, internal_offset); 1565 return NULL; 1566 } 1567 1568 assert(mobj_is_nonsec(mobj)); 1569 1570 if (mobj->size < size) { 1571 DMSG("Mobj %#"PRIx64": wrong size", cookie); 1572 mobj_put(mobj); 1573 return NULL; 1574 } 1575 1576 if (mobj_inc_map(mobj)) { 1577 DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie); 1578 mobj_put(mobj); 1579 return NULL; 1580 } 1581 1582 return mobj; 1583 } 1584 1585 struct mobj *thread_rpc_alloc_payload(size_t size) 1586 { 1587 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL); 1588 } 1589 1590 struct mobj *thread_rpc_alloc_kernel_payload(size_t size) 1591 { 1592 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL); 1593 } 1594 1595 void thread_rpc_free_kernel_payload(struct mobj *mobj) 1596 { 1597 thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj); 1598 } 1599 1600 void thread_rpc_free_payload(struct mobj *mobj) 1601 { 1602 thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1603 mobj); 1604 } 1605 1606 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1607 { 1608 return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL); 1609 } 1610 1611 void thread_rpc_free_global_payload(struct mobj *mobj) 1612 { 1613 thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj), 1614 mobj); 1615 } 1616 1617 void thread_spmc_register_secondary_ep(vaddr_t ep) 1618 { 1619 unsigned long ret = 0; 1620 1621 /* Let the SPM know the entry point for secondary CPUs */ 1622 ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0); 1623 1624 if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64) 1625 EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret); 1626 } 1627 1628 #if defined(CFG_CORE_SEL1_SPMC) 1629 static TEE_Result spmc_init(void) 1630 { 1631 my_endpoint_id = SPMC_ENDPOINT_ID; 1632 DMSG("My endpoint ID %#x", my_endpoint_id); 1633 1634 /* 1635 * If SPMD think we are version 1.0 it will report version 1.0 to 1636 * normal world regardless of what version we query the SPM with. 1637 * However, if SPMD think we are version 1.1 it will forward 1638 * queries from normal world to let us negotiate version. So by 1639 * setting version 1.0 here we should be compatible. 1640 * 1641 * Note that disagreement on negotiated version means that we'll 1642 * have communication problems with normal world. 1643 */ 1644 my_rxtx.ffa_vers = FFA_VERSION_1_0; 1645 1646 return TEE_SUCCESS; 1647 } 1648 #else /* !defined(CFG_CORE_SEL1_SPMC) */ 1649 static bool is_ffa_success(uint32_t fid) 1650 { 1651 #ifdef ARM64 1652 if (fid == FFA_SUCCESS_64) 1653 return true; 1654 #endif 1655 return fid == FFA_SUCCESS_32; 1656 } 1657 1658 static void spmc_rxtx_map(struct ffa_rxtx *rxtx) 1659 { 1660 struct thread_smc_args args = { 1661 #ifdef ARM64 1662 .a0 = FFA_RXTX_MAP_64, 1663 #else 1664 .a0 = FFA_RXTX_MAP_32, 1665 #endif 1666 .a1 = virt_to_phys(rxtx->tx), 1667 .a2 = virt_to_phys(rxtx->rx), 1668 .a3 = 1, 1669 }; 1670 1671 thread_smccc(&args); 1672 if (!is_ffa_success(args.a0)) { 1673 if (args.a0 == FFA_ERROR) 1674 EMSG("rxtx map failed with error %ld", args.a2); 1675 else 1676 EMSG("rxtx map failed"); 1677 panic(); 1678 } 1679 } 1680 1681 static uint16_t get_my_id(void) 1682 { 1683 struct thread_smc_args args = { 1684 .a0 = FFA_ID_GET, 1685 }; 1686 1687 thread_smccc(&args); 1688 if (!is_ffa_success(args.a0)) { 1689 if (args.a0 == FFA_ERROR) 1690 EMSG("Get id failed with error %ld", args.a2); 1691 else 1692 EMSG("Get id failed"); 1693 panic(); 1694 } 1695 1696 return args.a2; 1697 } 1698 1699 static uint32_t get_ffa_version(uint32_t my_version) 1700 { 1701 struct thread_smc_args args = { 1702 .a0 = FFA_VERSION, 1703 .a1 = my_version, 1704 }; 1705 1706 thread_smccc(&args); 1707 if (args.a0 & BIT(31)) { 1708 EMSG("FF-A version failed with error %ld", args.a0); 1709 panic(); 1710 } 1711 1712 return args.a0; 1713 } 1714 1715 static void *spmc_retrieve_req(uint64_t cookie, 1716 struct ffa_mem_transaction_x *trans) 1717 { 1718 struct ffa_mem_access *acc_descr_array = NULL; 1719 struct ffa_mem_access_perm *perm_descr = NULL; 1720 struct thread_smc_args args = { 1721 .a0 = FFA_MEM_RETRIEVE_REQ_32, 1722 .a3 = 0, /* Address, Using TX -> MBZ */ 1723 .a4 = 0, /* Using TX -> MBZ */ 1724 }; 1725 size_t size = 0; 1726 int rc = 0; 1727 1728 if (my_rxtx.ffa_vers == FFA_VERSION_1_0) { 1729 struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx; 1730 1731 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1732 memset(trans_descr, 0, size); 1733 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1734 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1735 trans_descr->global_handle = cookie; 1736 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1737 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1738 trans_descr->mem_access_count = 1; 1739 acc_descr_array = trans_descr->mem_access_array; 1740 } else { 1741 struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx; 1742 1743 size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access); 1744 memset(trans_descr, 0, size); 1745 trans_descr->sender_id = thread_get_tsd()->rpc_target_info; 1746 trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR; 1747 trans_descr->global_handle = cookie; 1748 trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE | 1749 FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT; 1750 trans_descr->mem_access_count = 1; 1751 trans_descr->mem_access_offs = sizeof(*trans_descr); 1752 trans_descr->mem_access_size = sizeof(struct ffa_mem_access); 1753 acc_descr_array = (void *)((vaddr_t)my_rxtx.tx + 1754 sizeof(*trans_descr)); 1755 } 1756 acc_descr_array->region_offs = 0; 1757 acc_descr_array->reserved = 0; 1758 perm_descr = &acc_descr_array->access_perm; 1759 perm_descr->endpoint_id = my_endpoint_id; 1760 perm_descr->perm = FFA_MEM_ACC_RW; 1761 perm_descr->flags = 0; 1762 1763 args.a1 = size; /* Total Length */ 1764 args.a2 = size; /* Frag Length == Total length */ 1765 thread_smccc(&args); 1766 if (args.a0 != FFA_MEM_RETRIEVE_RESP) { 1767 if (args.a0 == FFA_ERROR) 1768 EMSG("Failed to fetch cookie %#"PRIx64" error code %d", 1769 cookie, (int)args.a2); 1770 else 1771 EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64, 1772 cookie, args.a0); 1773 return NULL; 1774 } 1775 rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.tx, 1776 my_rxtx.size, trans); 1777 if (rc) { 1778 EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d", 1779 cookie, rc); 1780 return NULL; 1781 } 1782 1783 return my_rxtx.rx; 1784 } 1785 1786 void thread_spmc_relinquish(uint64_t cookie) 1787 { 1788 struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx; 1789 struct thread_smc_args args = { 1790 .a0 = FFA_MEM_RELINQUISH, 1791 }; 1792 1793 memset(relinquish_desc, 0, sizeof(*relinquish_desc)); 1794 relinquish_desc->handle = cookie; 1795 relinquish_desc->flags = 0; 1796 relinquish_desc->endpoint_count = 1; 1797 relinquish_desc->endpoint_id_array[0] = my_endpoint_id; 1798 thread_smccc(&args); 1799 if (!is_ffa_success(args.a0)) 1800 EMSG("Failed to relinquish cookie %#"PRIx64, cookie); 1801 } 1802 1803 static int set_pages(struct ffa_address_range *regions, 1804 unsigned int num_regions, unsigned int num_pages, 1805 struct mobj_ffa *mf) 1806 { 1807 unsigned int n = 0; 1808 unsigned int idx = 0; 1809 1810 for (n = 0; n < num_regions; n++) { 1811 unsigned int page_count = READ_ONCE(regions[n].page_count); 1812 uint64_t addr = READ_ONCE(regions[n].address); 1813 1814 if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count)) 1815 return FFA_INVALID_PARAMETERS; 1816 } 1817 1818 if (idx != num_pages) 1819 return FFA_INVALID_PARAMETERS; 1820 1821 return 0; 1822 } 1823 1824 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie) 1825 { 1826 struct mobj_ffa *ret = NULL; 1827 struct ffa_mem_transaction_x retrieve_desc = { }; 1828 struct ffa_mem_access *descr_array = NULL; 1829 struct ffa_mem_region *descr = NULL; 1830 struct mobj_ffa *mf = NULL; 1831 unsigned int num_pages = 0; 1832 unsigned int offs = 0; 1833 void *buf = NULL; 1834 struct thread_smc_args ffa_rx_release_args = { 1835 .a0 = FFA_RX_RELEASE 1836 }; 1837 1838 /* 1839 * OP-TEE is only supporting a single mem_region while the 1840 * specification allows for more than one. 1841 */ 1842 buf = spmc_retrieve_req(cookie, &retrieve_desc); 1843 if (!buf) { 1844 EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64, 1845 cookie); 1846 return NULL; 1847 } 1848 1849 descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs); 1850 offs = READ_ONCE(descr_array->region_offs); 1851 descr = (struct ffa_mem_region *)((vaddr_t)buf + offs); 1852 1853 num_pages = READ_ONCE(descr->total_page_count); 1854 mf = mobj_ffa_spmc_new(cookie, num_pages); 1855 if (!mf) 1856 goto out; 1857 1858 if (set_pages(descr->address_range_array, 1859 READ_ONCE(descr->address_range_count), num_pages, mf)) { 1860 mobj_ffa_spmc_delete(mf); 1861 goto out; 1862 } 1863 1864 ret = mf; 1865 1866 out: 1867 /* Release RX buffer after the mem retrieve request. */ 1868 thread_smccc(&ffa_rx_release_args); 1869 1870 return ret; 1871 } 1872 1873 static TEE_Result spmc_init(void) 1874 { 1875 unsigned int major = 0; 1876 unsigned int minor __maybe_unused = 0; 1877 uint32_t my_vers = 0; 1878 uint32_t vers = 0; 1879 1880 my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR); 1881 vers = get_ffa_version(my_vers); 1882 major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK; 1883 minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK; 1884 DMSG("SPMC reported version %u.%u", major, minor); 1885 if (major != FFA_VERSION_MAJOR) { 1886 EMSG("Incompatible major version %u, expected %u", 1887 major, FFA_VERSION_MAJOR); 1888 panic(); 1889 } 1890 if (vers < my_vers) 1891 my_vers = vers; 1892 DMSG("Using version %u.%u", 1893 (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK, 1894 (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK); 1895 my_rxtx.ffa_vers = my_vers; 1896 1897 spmc_rxtx_map(&my_rxtx); 1898 my_endpoint_id = get_my_id(); 1899 DMSG("My endpoint ID %#x", my_endpoint_id); 1900 1901 return TEE_SUCCESS; 1902 } 1903 #endif /* !defined(CFG_CORE_SEL1_SPMC) */ 1904 1905 /* 1906 * boot_final() is always done before exiting at end of boot 1907 * initialization. In case of virtualization the init-calls are done only 1908 * once a OP-TEE partition has been created. So with virtualization we have 1909 * to initialize via boot_final() to make sure we have a value assigned 1910 * before it's used the first time. 1911 */ 1912 #ifdef CFG_NS_VIRTUALIZATION 1913 boot_final(spmc_init); 1914 #else 1915 service_init(spmc_init); 1916 #endif 1917