1 /* 2 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 #include <assert.h> 7 #include <errno.h> 8 #include <inttypes.h> 9 10 #include <common/debug.h> 11 #include <common/runtime_svc.h> 12 #include <lib/object_pool.h> 13 #include <lib/spinlock.h> 14 #include <lib/xlat_tables/xlat_tables_v2.h> 15 #include <services/ffa_svc.h> 16 #include "spmc.h" 17 #include "spmc_shared_mem.h" 18 19 #include <platform_def.h> 20 21 /** 22 * struct spmc_shmem_obj - Shared memory object. 23 * @desc_size: Size of @desc. 24 * @desc_filled: Size of @desc already received. 25 * @in_use: Number of clients that have called ffa_mem_retrieve_req 26 * without a matching ffa_mem_relinquish call. 27 * @desc: FF-A memory region descriptor passed in ffa_mem_share. 28 */ 29 struct spmc_shmem_obj { 30 size_t desc_size; 31 size_t desc_filled; 32 size_t in_use; 33 struct ffa_mtd desc; 34 }; 35 36 /* 37 * Declare our data structure to store the metadata of memory share requests. 38 * The main datastore is allocated on a per platform basis to ensure enough 39 * storage can be made available. 40 * The address of the data store will be populated by the SPMC during its 41 * initialization. 42 */ 43 44 struct spmc_shmem_obj_state spmc_shmem_obj_state = { 45 /* Set start value for handle so top 32 bits are needed quickly. */ 46 .next_handle = 0xffffffc0U, 47 }; 48 49 /** 50 * spmc_shmem_obj_size - Convert from descriptor size to object size. 51 * @desc_size: Size of struct ffa_memory_region_descriptor object. 52 * 53 * Return: Size of struct spmc_shmem_obj object. 54 */ 55 static size_t spmc_shmem_obj_size(size_t desc_size) 56 { 57 return desc_size + offsetof(struct spmc_shmem_obj, desc); 58 } 59 60 /** 61 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj. 62 * @state: Global state. 63 * @desc_size: Size of struct ffa_memory_region_descriptor object that 64 * allocated object will hold. 65 * 66 * Return: Pointer to newly allocated object, or %NULL if there not enough space 67 * left. The returned pointer is only valid while @state is locked, to 68 * used it again after unlocking @state, spmc_shmem_obj_lookup must be 69 * called. 70 */ 71 static struct spmc_shmem_obj * 72 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size) 73 { 74 struct spmc_shmem_obj *obj; 75 size_t free = state->data_size - state->allocated; 76 size_t obj_size; 77 78 if (state->data == NULL) { 79 ERROR("Missing shmem datastore!\n"); 80 return NULL; 81 } 82 83 /* Ensure that descriptor size is aligned */ 84 if (!is_aligned(desc_size, 16)) { 85 WARN("%s(0x%zx) desc_size not 16-byte aligned\n", 86 __func__, desc_size); 87 return NULL; 88 } 89 90 obj_size = spmc_shmem_obj_size(desc_size); 91 92 /* Ensure the obj size has not overflowed. */ 93 if (obj_size < desc_size) { 94 WARN("%s(0x%zx) desc_size overflow\n", 95 __func__, desc_size); 96 return NULL; 97 } 98 99 if (obj_size > free) { 100 WARN("%s(0x%zx) failed, free 0x%zx\n", 101 __func__, desc_size, free); 102 return NULL; 103 } 104 obj = (struct spmc_shmem_obj *)(state->data + state->allocated); 105 obj->desc = (struct ffa_mtd) {0}; 106 obj->desc_size = desc_size; 107 obj->desc_filled = 0; 108 obj->in_use = 0; 109 state->allocated += obj_size; 110 return obj; 111 } 112 113 /** 114 * spmc_shmem_obj_free - Free struct spmc_shmem_obj. 115 * @state: Global state. 116 * @obj: Object to free. 117 * 118 * Release memory used by @obj. Other objects may move, so on return all 119 * pointers to struct spmc_shmem_obj object should be considered invalid, not 120 * just @obj. 121 * 122 * The current implementation always compacts the remaining objects to simplify 123 * the allocator and to avoid fragmentation. 124 */ 125 126 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state, 127 struct spmc_shmem_obj *obj) 128 { 129 size_t free_size = spmc_shmem_obj_size(obj->desc_size); 130 uint8_t *shift_dest = (uint8_t *)obj; 131 uint8_t *shift_src = shift_dest + free_size; 132 size_t shift_size = state->allocated - (shift_src - state->data); 133 134 if (shift_size != 0U) { 135 memmove(shift_dest, shift_src, shift_size); 136 } 137 state->allocated -= free_size; 138 } 139 140 /** 141 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle. 142 * @state: Global state. 143 * @handle: Unique handle of object to return. 144 * 145 * Return: struct spmc_shmem_obj_state object with handle matching @handle. 146 * %NULL, if not object in @state->data has a matching handle. 147 */ 148 static struct spmc_shmem_obj * 149 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle) 150 { 151 uint8_t *curr = state->data; 152 153 while (curr - state->data < state->allocated) { 154 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr; 155 156 if (obj->desc.handle == handle) { 157 return obj; 158 } 159 curr += spmc_shmem_obj_size(obj->desc_size); 160 } 161 return NULL; 162 } 163 164 /** 165 * spmc_shmem_obj_get_next - Get the next memory object from an offset. 166 * @offset: Offset used to track which objects have previously been 167 * returned. 168 * 169 * Return: the next struct spmc_shmem_obj_state object from the provided 170 * offset. 171 * %NULL, if there are no more objects. 172 */ 173 static struct spmc_shmem_obj * 174 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset) 175 { 176 uint8_t *curr = state->data + *offset; 177 178 if (curr - state->data < state->allocated) { 179 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr; 180 181 *offset += spmc_shmem_obj_size(obj->desc_size); 182 183 return obj; 184 } 185 return NULL; 186 } 187 188 /******************************************************************************* 189 * FF-A memory descriptor helper functions. 190 ******************************************************************************/ 191 /** 192 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the 193 * clients FF-A version. 194 * @desc: The memory transaction descriptor. 195 * @index: The index of the emad element to be accessed. 196 * @ffa_version: FF-A version of the provided structure. 197 * @emad_size: Will be populated with the size of the returned emad 198 * descriptor. 199 * Return: A pointer to the requested emad structure. 200 */ 201 static void * 202 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index, 203 uint32_t ffa_version, size_t *emad_size) 204 { 205 uint8_t *emad; 206 207 assert(index < desc->emad_count); 208 209 /* 210 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0 211 * format, otherwise assume it is a v1.1 format. 212 */ 213 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 214 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad); 215 *emad_size = sizeof(struct ffa_emad_v1_0); 216 } else { 217 assert(is_aligned(desc->emad_offset, 16)); 218 emad = ((uint8_t *) desc + desc->emad_offset); 219 *emad_size = desc->emad_size; 220 } 221 222 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX); 223 return (emad + (*emad_size * index)); 224 } 225 226 /** 227 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the 228 * FF-A version of the descriptor. 229 * @obj: Object containing ffa_memory_region_descriptor. 230 * 231 * Return: struct ffa_comp_mrd object corresponding to the composite memory 232 * region descriptor. 233 */ 234 static struct ffa_comp_mrd * 235 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version) 236 { 237 size_t emad_size; 238 /* 239 * The comp_mrd_offset field of the emad descriptor remains consistent 240 * between FF-A versions therefore we can use the v1.0 descriptor here 241 * in all cases. 242 */ 243 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0, 244 ffa_version, 245 &emad_size); 246 247 /* Ensure the composite descriptor offset is aligned. */ 248 if (!is_aligned(emad->comp_mrd_offset, 8)) { 249 WARN("Unaligned composite memory region descriptor offset.\n"); 250 return NULL; 251 } 252 253 return (struct ffa_comp_mrd *) 254 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset); 255 } 256 257 /** 258 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in 259 * a given memory transaction. 260 * @sp_id: Partition ID to validate. 261 * @obj: The shared memory object containing the descriptor 262 * of the memory transaction. 263 * Return: true if ID is valid, else false. 264 */ 265 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id) 266 { 267 bool found = false; 268 struct ffa_mtd *desc = &obj->desc; 269 size_t desc_size = obj->desc_size; 270 271 /* Validate the partition is a valid participant. */ 272 for (unsigned int i = 0U; i < desc->emad_count; i++) { 273 size_t emad_size; 274 struct ffa_emad_v1_0 *emad; 275 276 emad = spmc_shmem_obj_get_emad(desc, i, 277 MAKE_FFA_VERSION(1, 1), 278 &emad_size); 279 /* 280 * Validate the calculated emad address resides within the 281 * descriptor. 282 */ 283 if ((emad == NULL) || (uintptr_t) emad >= 284 (uintptr_t)((uint8_t *) desc + desc_size)) { 285 VERBOSE("Invalid emad.\n"); 286 break; 287 } 288 if (sp_id == emad->mapd.endpoint_id) { 289 found = true; 290 break; 291 } 292 } 293 return found; 294 } 295 296 /* 297 * Compare two memory regions to determine if any range overlaps with another 298 * ongoing memory transaction. 299 */ 300 static bool 301 overlapping_memory_regions(struct ffa_comp_mrd *region1, 302 struct ffa_comp_mrd *region2) 303 { 304 uint64_t region1_start; 305 uint64_t region1_size; 306 uint64_t region1_end; 307 uint64_t region2_start; 308 uint64_t region2_size; 309 uint64_t region2_end; 310 311 assert(region1 != NULL); 312 assert(region2 != NULL); 313 314 if (region1 == region2) { 315 return true; 316 } 317 318 /* 319 * Check each memory region in the request against existing 320 * transactions. 321 */ 322 for (size_t i = 0; i < region1->address_range_count; i++) { 323 324 region1_start = region1->address_range_array[i].address; 325 region1_size = 326 region1->address_range_array[i].page_count * 327 PAGE_SIZE_4KB; 328 region1_end = region1_start + region1_size; 329 330 for (size_t j = 0; j < region2->address_range_count; j++) { 331 332 region2_start = region2->address_range_array[j].address; 333 region2_size = 334 region2->address_range_array[j].page_count * 335 PAGE_SIZE_4KB; 336 region2_end = region2_start + region2_size; 337 338 /* Check if regions are not overlapping. */ 339 if (!((region2_end <= region1_start) || 340 (region1_end <= region2_start))) { 341 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n", 342 region1_start, region1_end, 343 region2_start, region2_end); 344 return true; 345 } 346 } 347 } 348 return false; 349 } 350 351 /******************************************************************************* 352 * FF-A v1.0 Memory Descriptor Conversion Helpers. 353 ******************************************************************************/ 354 /** 355 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1 356 * converted descriptor. 357 * @orig: The original v1.0 memory transaction descriptor. 358 * @desc_size: The size of the original v1.0 memory transaction descriptor. 359 * 360 * Return: the size required to store the descriptor store in the v1.1 format. 361 */ 362 static uint64_t 363 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size) 364 { 365 uint64_t size = 0; 366 struct ffa_comp_mrd *mrd; 367 struct ffa_emad_v1_0 *emad_array = orig->emad; 368 369 /* Get the size of the v1.1 descriptor. */ 370 size += sizeof(struct ffa_mtd); 371 372 /* Add the size of the emad descriptors. */ 373 size += orig->emad_count * sizeof(struct ffa_emad_v1_0); 374 375 /* Add the size of the composite mrds. */ 376 size += sizeof(struct ffa_comp_mrd); 377 378 /* Add the size of the constituent mrds. */ 379 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig + 380 emad_array[0].comp_mrd_offset); 381 382 /* Add the size of the memory region descriptors. */ 383 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd); 384 385 return size; 386 } 387 388 /** 389 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0 390 * converted descriptor. 391 * @orig: The original v1.1 memory transaction descriptor. 392 * @desc_size: The size of the original v1.1 memory transaction descriptor. 393 * 394 * Return: the size required to store the descriptor store in the v1.0 format. 395 */ 396 static size_t 397 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size) 398 { 399 size_t size = 0; 400 struct ffa_comp_mrd *mrd; 401 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *) 402 ((uint8_t *) orig + 403 orig->emad_offset); 404 405 /* Get the size of the v1.0 descriptor. */ 406 size += sizeof(struct ffa_mtd_v1_0); 407 408 /* Add the size of the v1.0 emad descriptors. */ 409 size += orig->emad_count * sizeof(struct ffa_emad_v1_0); 410 411 /* Add the size of the composite mrds. */ 412 size += sizeof(struct ffa_comp_mrd); 413 414 /* Add the size of the constituent mrds. */ 415 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig + 416 emad_array[0].comp_mrd_offset); 417 418 /* Check the calculated address is within the memory descriptor. */ 419 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) > 420 (uintptr_t)((uint8_t *) orig + desc_size)) { 421 return 0; 422 } 423 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd); 424 425 return size; 426 } 427 428 /** 429 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object. 430 * @out_obj: The shared memory object to populate the converted descriptor. 431 * @orig: The shared memory object containing the v1.0 descriptor. 432 * 433 * Return: true if the conversion is successful else false. 434 */ 435 static bool 436 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj, 437 struct spmc_shmem_obj *orig) 438 { 439 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc; 440 struct ffa_mtd *out = &out_obj->desc; 441 struct ffa_emad_v1_0 *emad_array_in; 442 struct ffa_emad_v1_0 *emad_array_out; 443 struct ffa_comp_mrd *mrd_in; 444 struct ffa_comp_mrd *mrd_out; 445 446 size_t mrd_in_offset; 447 size_t mrd_out_offset; 448 size_t mrd_size = 0; 449 450 /* Populate the new descriptor format from the v1.0 struct. */ 451 out->sender_id = mtd_orig->sender_id; 452 out->memory_region_attributes = mtd_orig->memory_region_attributes; 453 out->flags = mtd_orig->flags; 454 out->handle = mtd_orig->handle; 455 out->tag = mtd_orig->tag; 456 out->emad_count = mtd_orig->emad_count; 457 out->emad_size = sizeof(struct ffa_emad_v1_0); 458 459 /* 460 * We will locate the emad descriptors directly after the ffa_mtd 461 * struct. This will be 8-byte aligned. 462 */ 463 out->emad_offset = sizeof(struct ffa_mtd); 464 465 emad_array_in = mtd_orig->emad; 466 emad_array_out = (struct ffa_emad_v1_0 *) 467 ((uint8_t *) out + out->emad_offset); 468 469 /* Copy across the emad structs. */ 470 for (unsigned int i = 0U; i < out->emad_count; i++) { 471 /* Bound check for emad array. */ 472 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) > 473 ((uint8_t *) mtd_orig + orig->desc_size)) { 474 VERBOSE("%s: Invalid mtd structure.\n", __func__); 475 return false; 476 } 477 memcpy(&emad_array_out[i], &emad_array_in[i], 478 sizeof(struct ffa_emad_v1_0)); 479 } 480 481 /* Place the mrd descriptors after the end of the emad descriptors.*/ 482 mrd_in_offset = emad_array_in->comp_mrd_offset; 483 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count); 484 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset); 485 486 /* Add the size of the composite memory region descriptor. */ 487 mrd_size += sizeof(struct ffa_comp_mrd); 488 489 /* Find the mrd descriptor. */ 490 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset); 491 492 /* Add the size of the constituent memory region descriptors. */ 493 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd); 494 495 /* 496 * Update the offset in the emads by the delta between the input and 497 * output addresses. 498 */ 499 for (unsigned int i = 0U; i < out->emad_count; i++) { 500 emad_array_out[i].comp_mrd_offset = 501 emad_array_in[i].comp_mrd_offset + 502 (mrd_out_offset - mrd_in_offset); 503 } 504 505 /* Verify that we stay within bound of the memory descriptors. */ 506 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) > 507 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) || 508 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) > 509 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) { 510 ERROR("%s: Invalid mrd structure.\n", __func__); 511 return false; 512 } 513 514 /* Copy the mrd descriptors directly. */ 515 memcpy(mrd_out, mrd_in, mrd_size); 516 517 return true; 518 } 519 520 /** 521 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to 522 * v1.0 memory object. 523 * @out_obj: The shared memory object to populate the v1.0 descriptor. 524 * @orig: The shared memory object containing the v1.1 descriptor. 525 * 526 * Return: true if the conversion is successful else false. 527 */ 528 static bool 529 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj, 530 struct spmc_shmem_obj *orig) 531 { 532 struct ffa_mtd *mtd_orig = &orig->desc; 533 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc; 534 struct ffa_emad_v1_0 *emad_in; 535 struct ffa_emad_v1_0 *emad_array_in; 536 struct ffa_emad_v1_0 *emad_array_out; 537 struct ffa_comp_mrd *mrd_in; 538 struct ffa_comp_mrd *mrd_out; 539 540 size_t mrd_in_offset; 541 size_t mrd_out_offset; 542 size_t emad_out_array_size; 543 size_t mrd_size = 0; 544 size_t orig_desc_size = orig->desc_size; 545 546 /* Populate the v1.0 descriptor format from the v1.1 struct. */ 547 out->sender_id = mtd_orig->sender_id; 548 out->memory_region_attributes = mtd_orig->memory_region_attributes; 549 out->flags = mtd_orig->flags; 550 out->handle = mtd_orig->handle; 551 out->tag = mtd_orig->tag; 552 out->emad_count = mtd_orig->emad_count; 553 554 /* Determine the location of the emad array in both descriptors. */ 555 emad_array_in = (struct ffa_emad_v1_0 *) 556 ((uint8_t *) mtd_orig + mtd_orig->emad_offset); 557 emad_array_out = out->emad; 558 559 /* Copy across the emad structs. */ 560 emad_in = emad_array_in; 561 for (unsigned int i = 0U; i < out->emad_count; i++) { 562 /* Bound check for emad array. */ 563 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) > 564 ((uint8_t *) mtd_orig + orig_desc_size)) { 565 VERBOSE("%s: Invalid mtd structure.\n", __func__); 566 return false; 567 } 568 memcpy(&emad_array_out[i], emad_in, 569 sizeof(struct ffa_emad_v1_0)); 570 571 emad_in += mtd_orig->emad_size; 572 } 573 574 /* Place the mrd descriptors after the end of the emad descriptors. */ 575 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count; 576 577 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out + 578 emad_out_array_size; 579 580 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset); 581 582 mrd_in_offset = mtd_orig->emad_offset + 583 (mtd_orig->emad_size * mtd_orig->emad_count); 584 585 /* Add the size of the composite memory region descriptor. */ 586 mrd_size += sizeof(struct ffa_comp_mrd); 587 588 /* Find the mrd descriptor. */ 589 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset); 590 591 /* Add the size of the constituent memory region descriptors. */ 592 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd); 593 594 /* 595 * Update the offset in the emads by the delta between the input and 596 * output addresses. 597 */ 598 emad_in = emad_array_in; 599 600 for (unsigned int i = 0U; i < out->emad_count; i++) { 601 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset + 602 (mrd_out_offset - 603 mrd_in_offset); 604 emad_in += mtd_orig->emad_size; 605 } 606 607 /* Verify that we stay within bound of the memory descriptors. */ 608 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) > 609 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) || 610 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) > 611 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) { 612 ERROR("%s: Invalid mrd structure.\n", __func__); 613 return false; 614 } 615 616 /* Copy the mrd descriptors directly. */ 617 memcpy(mrd_out, mrd_in, mrd_size); 618 619 return true; 620 } 621 622 /** 623 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to 624 * the v1.0 format and populates the 625 * provided buffer. 626 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor. 627 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor. 628 * @buf_size: Size of the buffer to populate. 629 * @offset: The offset of the converted descriptor to copy. 630 * @copy_size: Will be populated with the number of bytes copied. 631 * @out_desc_size: Will be populated with the total size of the v1.0 632 * descriptor. 633 * 634 * Return: 0 if conversion and population succeeded. 635 * Note: This function invalidates the reference to @orig therefore 636 * `spmc_shmem_obj_lookup` must be called if further usage is required. 637 */ 638 static uint32_t 639 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj, 640 size_t buf_size, size_t offset, 641 size_t *copy_size, size_t *v1_0_desc_size) 642 { 643 struct spmc_shmem_obj *v1_0_obj; 644 645 /* Calculate the size that the v1.0 descriptor will require. */ 646 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size( 647 &orig_obj->desc, orig_obj->desc_size); 648 649 if (*v1_0_desc_size == 0) { 650 ERROR("%s: cannot determine size of descriptor.\n", 651 __func__); 652 return FFA_ERROR_INVALID_PARAMETER; 653 } 654 655 /* Get a new obj to store the v1.0 descriptor. */ 656 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, 657 *v1_0_desc_size); 658 659 if (!v1_0_obj) { 660 return FFA_ERROR_NO_MEMORY; 661 } 662 663 /* Perform the conversion from v1.1 to v1.0. */ 664 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) { 665 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj); 666 return FFA_ERROR_INVALID_PARAMETER; 667 } 668 669 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size); 670 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size); 671 672 /* 673 * We're finished with the v1.0 descriptor for now so free it. 674 * Note that this will invalidate any references to the v1.1 675 * descriptor. 676 */ 677 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj); 678 679 return 0; 680 } 681 682 static bool compatible_version(uint32_t ffa_version, uint16_t major, 683 uint16_t minor) 684 { 685 bool bit31_set = ffa_version & FFA_VERSION_BIT31_MASK; 686 uint16_t majv = (ffa_version >> FFA_VERSION_MAJOR_SHIFT) & 687 FFA_VERSION_MAJOR_MASK; 688 uint16_t minv = (ffa_version >> FFA_VERSION_MINOR_SHIFT) & 689 FFA_VERSION_MINOR_MASK; 690 691 return !bit31_set && majv == major && minv >= minor; 692 } 693 694 static int 695 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version, 696 size_t fragment_length, size_t total_length) 697 { 698 unsigned long long emad_end; 699 unsigned long long emad_size; 700 unsigned long long emad_offset; 701 unsigned int min_desc_size; 702 703 /* Determine the appropriate minimum descriptor size. */ 704 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 705 min_desc_size = sizeof(struct ffa_mtd_v1_0); 706 } else if (compatible_version(ffa_version, 1, 1)) { 707 min_desc_size = sizeof(struct ffa_mtd); 708 } else { 709 return FFA_ERROR_INVALID_PARAMETER; 710 } 711 if (fragment_length < min_desc_size) { 712 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length, 713 min_desc_size); 714 return FFA_ERROR_INVALID_PARAMETER; 715 } 716 717 if (desc->emad_count == 0U) { 718 WARN("%s: unsupported attribute desc count %u.\n", 719 __func__, desc->emad_count); 720 return FFA_ERROR_INVALID_PARAMETER; 721 } 722 723 /* 724 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0 725 * format, otherwise assume it is a v1.1 format. 726 */ 727 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 728 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0); 729 } else { 730 if (!is_aligned(desc->emad_offset, 16)) { 731 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n", 732 __func__, desc->emad_offset); 733 return FFA_ERROR_INVALID_PARAMETER; 734 } 735 if (desc->emad_offset < sizeof(struct ffa_mtd)) { 736 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n", 737 __func__, desc->emad_offset, 738 sizeof(struct ffa_mtd)); 739 return FFA_ERROR_INVALID_PARAMETER; 740 } 741 emad_offset = desc->emad_offset; 742 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) { 743 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__, 744 desc->emad_size, sizeof(struct ffa_emad_v1_0)); 745 return FFA_ERROR_INVALID_PARAMETER; 746 } 747 if (!is_aligned(desc->emad_size, 16)) { 748 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n", 749 __func__, desc->emad_size); 750 return FFA_ERROR_INVALID_PARAMETER; 751 } 752 emad_size = desc->emad_size; 753 } 754 755 /* 756 * Overflow is impossible: the arithmetic happens in at least 64-bit 757 * precision, but all of the operands are bounded by UINT32_MAX, and 758 * ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1)) 759 * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1)) 760 * = ((2^32 - 1) * (2^32 + 1)) 761 * = (2^64 - 1). 762 */ 763 CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large); 764 emad_end = (desc->emad_count * (unsigned long long)emad_size) + 765 (unsigned long long)sizeof(struct ffa_comp_mrd) + 766 (unsigned long long)emad_offset; 767 768 if (emad_end > total_length) { 769 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n", 770 __func__, emad_end, total_length); 771 return FFA_ERROR_INVALID_PARAMETER; 772 } 773 774 return 0; 775 } 776 777 static inline const struct ffa_emad_v1_0 * 778 emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset) 779 { 780 return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset); 781 } 782 783 /** 784 * spmc_shmem_check_obj - Check that counts in descriptor match overall size. 785 * @obj: Object containing ffa_memory_region_descriptor. 786 * @ffa_version: FF-A version of the provided descriptor. 787 * 788 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if 789 * constituent_memory_region_descriptor offset or count is invalid. 790 */ 791 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj, 792 uint32_t ffa_version) 793 { 794 unsigned long long total_page_count; 795 const struct ffa_emad_v1_0 *first_emad; 796 const struct ffa_emad_v1_0 *end_emad; 797 size_t emad_size; 798 uint32_t comp_mrd_offset; 799 size_t header_emad_size; 800 size_t size; 801 size_t count; 802 size_t expected_size; 803 const struct ffa_comp_mrd *comp; 804 805 if (obj->desc_filled != obj->desc_size) { 806 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n", 807 __func__, obj->desc_filled, obj->desc_size); 808 panic(); 809 } 810 811 if (spmc_validate_mtd_start(&obj->desc, ffa_version, 812 obj->desc_filled, obj->desc_size)) { 813 ERROR("BUG: %s called on object with corrupt memory region descriptor\n", 814 __func__); 815 panic(); 816 } 817 818 first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0, 819 ffa_version, &emad_size); 820 end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size); 821 comp_mrd_offset = first_emad->comp_mrd_offset; 822 823 /* Loop through the endpoint descriptors, validating each of them. */ 824 for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) { 825 ffa_endpoint_id16_t ep_id; 826 827 /* 828 * If a partition ID resides in the secure world validate that 829 * the partition ID is for a known partition. Ignore any 830 * partition ID belonging to the normal world as it is assumed 831 * the Hypervisor will have validated these. 832 */ 833 ep_id = emad->mapd.endpoint_id; 834 if (ffa_is_secure_world_id(ep_id)) { 835 if (spmc_get_sp_ctx(ep_id) == NULL) { 836 WARN("%s: Invalid receiver id 0x%x\n", 837 __func__, ep_id); 838 return FFA_ERROR_INVALID_PARAMETER; 839 } 840 } 841 842 /* 843 * The offset provided to the composite memory region descriptor 844 * should be consistent across endpoint descriptors. 845 */ 846 if (comp_mrd_offset != emad->comp_mrd_offset) { 847 ERROR("%s: mismatching offsets provided, %u != %u\n", 848 __func__, emad->comp_mrd_offset, comp_mrd_offset); 849 return FFA_ERROR_INVALID_PARAMETER; 850 } 851 852 /* Advance to the next endpoint descriptor */ 853 emad = emad_advance(emad, emad_size); 854 855 /* 856 * Ensure neither this emad nor any subsequent emads have 857 * the same partition ID as the previous emad. 858 */ 859 for (const struct ffa_emad_v1_0 *other_emad = emad; 860 other_emad < end_emad; 861 other_emad = emad_advance(other_emad, emad_size)) { 862 if (ep_id == other_emad->mapd.endpoint_id) { 863 WARN("%s: Duplicated endpoint id 0x%x\n", 864 __func__, emad->mapd.endpoint_id); 865 return FFA_ERROR_INVALID_PARAMETER; 866 } 867 } 868 } 869 870 header_emad_size = (size_t)((const uint8_t *)end_emad - 871 (const uint8_t *)&obj->desc); 872 873 /* 874 * Check that the composite descriptor 875 * is after the endpoint descriptors. 876 */ 877 if (comp_mrd_offset < header_emad_size) { 878 WARN("%s: invalid object, offset %u < header + emad %zu\n", 879 __func__, comp_mrd_offset, header_emad_size); 880 return FFA_ERROR_INVALID_PARAMETER; 881 } 882 883 /* Ensure the composite descriptor offset is aligned. */ 884 if (!is_aligned(comp_mrd_offset, 16)) { 885 WARN("%s: invalid object, unaligned composite memory " 886 "region descriptor offset %u.\n", 887 __func__, comp_mrd_offset); 888 return FFA_ERROR_INVALID_PARAMETER; 889 } 890 891 size = obj->desc_size; 892 893 /* Check that the composite descriptor is in bounds. */ 894 if (comp_mrd_offset > size) { 895 WARN("%s: invalid object, offset %u > total size %zu\n", 896 __func__, comp_mrd_offset, obj->desc_size); 897 return FFA_ERROR_INVALID_PARAMETER; 898 } 899 size -= comp_mrd_offset; 900 901 /* Check that there is enough space for the composite descriptor. */ 902 if (size < sizeof(struct ffa_comp_mrd)) { 903 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n", 904 __func__, comp_mrd_offset, obj->desc_size); 905 return FFA_ERROR_INVALID_PARAMETER; 906 } 907 size -= sizeof(*comp); 908 909 count = size / sizeof(struct ffa_cons_mrd); 910 911 comp = (const struct ffa_comp_mrd *) 912 ((const uint8_t *)(&obj->desc) + comp_mrd_offset); 913 914 if (comp->address_range_count != count) { 915 WARN("%s: invalid object, desc count %u != %zu\n", 916 __func__, comp->address_range_count, count); 917 return FFA_ERROR_INVALID_PARAMETER; 918 } 919 920 /* Ensure that the expected and actual sizes are equal. */ 921 expected_size = comp_mrd_offset + sizeof(*comp) + 922 count * sizeof(struct ffa_cons_mrd); 923 924 if (expected_size != obj->desc_size) { 925 WARN("%s: invalid object, computed size %zu != size %zu\n", 926 __func__, expected_size, obj->desc_size); 927 return FFA_ERROR_INVALID_PARAMETER; 928 } 929 930 total_page_count = 0; 931 932 /* 933 * comp->address_range_count is 32-bit, so 'count' must fit in a 934 * uint32_t at this point. 935 */ 936 for (size_t i = 0; i < count; i++) { 937 const struct ffa_cons_mrd *mrd = comp->address_range_array + i; 938 939 if (!is_aligned(mrd->address, PAGE_SIZE)) { 940 WARN("%s: invalid object, address in region descriptor " 941 "%zu not 4K aligned (got 0x%016llx)", 942 __func__, i, (unsigned long long)mrd->address); 943 } 944 945 /* 946 * No overflow possible: total_page_count can hold at 947 * least 2^64 - 1, but will be have at most 2^32 - 1. 948 * values added to it, each of which cannot exceed 2^32 - 1. 949 */ 950 total_page_count += mrd->page_count; 951 } 952 953 if (comp->total_page_count != total_page_count) { 954 WARN("%s: invalid object, desc total_page_count %u != %llu\n", 955 __func__, comp->total_page_count, total_page_count); 956 return FFA_ERROR_INVALID_PARAMETER; 957 } 958 959 return 0; 960 } 961 962 /** 963 * spmc_shmem_check_state_obj - Check if the descriptor describes memory 964 * regions that are currently involved with an 965 * existing memory transactions. This implies that 966 * the memory is not in a valid state for lending. 967 * @obj: Object containing ffa_memory_region_descriptor. 968 * 969 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory 970 * state. 971 */ 972 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj, 973 uint32_t ffa_version) 974 { 975 size_t obj_offset = 0; 976 struct spmc_shmem_obj *inflight_obj; 977 978 struct ffa_comp_mrd *other_mrd; 979 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj, 980 ffa_version); 981 982 if (requested_mrd == NULL) { 983 return FFA_ERROR_INVALID_PARAMETER; 984 } 985 986 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state, 987 &obj_offset); 988 989 while (inflight_obj != NULL) { 990 /* 991 * Don't compare the transaction to itself or to partially 992 * transmitted descriptors. 993 */ 994 if ((obj->desc.handle != inflight_obj->desc.handle) && 995 (obj->desc_size == obj->desc_filled)) { 996 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj, 997 FFA_VERSION_COMPILED); 998 if (other_mrd == NULL) { 999 return FFA_ERROR_INVALID_PARAMETER; 1000 } 1001 if (overlapping_memory_regions(requested_mrd, 1002 other_mrd)) { 1003 return FFA_ERROR_INVALID_PARAMETER; 1004 } 1005 } 1006 1007 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state, 1008 &obj_offset); 1009 } 1010 return 0; 1011 } 1012 1013 static long spmc_ffa_fill_desc(struct mailbox *mbox, 1014 struct spmc_shmem_obj *obj, 1015 uint32_t fragment_length, 1016 ffa_mtd_flag32_t mtd_flag, 1017 uint32_t ffa_version, 1018 void *smc_handle) 1019 { 1020 int ret; 1021 uint32_t handle_low; 1022 uint32_t handle_high; 1023 1024 if (mbox->rxtx_page_count == 0U) { 1025 WARN("%s: buffer pair not registered.\n", __func__); 1026 ret = FFA_ERROR_INVALID_PARAMETER; 1027 goto err_arg; 1028 } 1029 1030 CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count); 1031 if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) { 1032 WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__, 1033 fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB); 1034 ret = FFA_ERROR_INVALID_PARAMETER; 1035 goto err_arg; 1036 } 1037 1038 if (fragment_length > obj->desc_size - obj->desc_filled) { 1039 WARN("%s: bad fragment size %u > %zu remaining\n", __func__, 1040 fragment_length, obj->desc_size - obj->desc_filled); 1041 ret = FFA_ERROR_INVALID_PARAMETER; 1042 goto err_arg; 1043 } 1044 1045 memcpy((uint8_t *)&obj->desc + obj->desc_filled, 1046 (uint8_t *) mbox->tx_buffer, fragment_length); 1047 1048 /* Ensure that the sender ID resides in the normal world. */ 1049 if (ffa_is_secure_world_id(obj->desc.sender_id)) { 1050 WARN("%s: Invalid sender ID 0x%x.\n", 1051 __func__, obj->desc.sender_id); 1052 ret = FFA_ERROR_DENIED; 1053 goto err_arg; 1054 } 1055 1056 /* 1057 * Ensure the NS bit is set to 0. Only perform this check 1058 * for the first fragment, because the bit will be set for 1059 * all the later fragments. 1060 */ 1061 if (obj->desc_filled == 0U && 1062 (obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) { 1063 WARN("%s: NS mem attributes flags MBZ.\n", __func__); 1064 ret = FFA_ERROR_INVALID_PARAMETER; 1065 goto err_arg; 1066 } 1067 /* 1068 * Ensure the NS bit is set to 1 since we only allow non-secure senders. 1069 * The specification requires that the NS bit is MBZ for 1070 * FFA_MEM_{DONATE,LEND,SHARE,RETRIEVE_REQ}, but we set the bit here 1071 * for internal bookkeeping to mark that the transaction did come 1072 * from the normal world. 1073 */ 1074 obj->desc.memory_region_attributes |= FFA_MEM_ATTR_NS_BIT; 1075 1076 /* 1077 * We don't currently support any optional flags so ensure none are 1078 * requested. 1079 */ 1080 if (obj->desc.flags != 0U && mtd_flag != 0U && 1081 (obj->desc.flags != mtd_flag)) { 1082 WARN("%s: invalid memory transaction flags %u != %u\n", 1083 __func__, obj->desc.flags, mtd_flag); 1084 ret = FFA_ERROR_INVALID_PARAMETER; 1085 goto err_arg; 1086 } 1087 1088 if (obj->desc_filled == 0U) { 1089 /* First fragment, descriptor header has been copied */ 1090 ret = spmc_validate_mtd_start(&obj->desc, ffa_version, 1091 fragment_length, obj->desc_size); 1092 if (ret != 0) { 1093 goto err_bad_desc; 1094 } 1095 1096 obj->desc.handle = spmc_shmem_obj_state.next_handle++; 1097 obj->desc.flags |= mtd_flag; 1098 } 1099 1100 obj->desc_filled += fragment_length; 1101 1102 handle_low = (uint32_t)obj->desc.handle; 1103 handle_high = obj->desc.handle >> 32; 1104 1105 if (obj->desc_filled != obj->desc_size) { 1106 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low, 1107 handle_high, obj->desc_filled, 1108 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0); 1109 } 1110 1111 /* The full descriptor has been received, perform any final checks. */ 1112 1113 ret = spmc_shmem_check_obj(obj, ffa_version); 1114 if (ret != 0) { 1115 goto err_bad_desc; 1116 } 1117 1118 ret = spmc_shmem_check_state_obj(obj, ffa_version); 1119 if (ret) { 1120 ERROR("%s: invalid memory region descriptor.\n", __func__); 1121 goto err_bad_desc; 1122 } 1123 1124 /* 1125 * Everything checks out, if the sender was using FF-A v1.0, convert 1126 * the descriptor format to use the v1.1 structures. 1127 */ 1128 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 1129 struct spmc_shmem_obj *v1_1_obj; 1130 uint64_t mem_handle; 1131 1132 /* Calculate the size that the v1.1 descriptor will required. */ 1133 uint64_t v1_1_desc_size = 1134 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc, 1135 obj->desc_size); 1136 1137 if (v1_1_desc_size > UINT32_MAX) { 1138 ret = FFA_ERROR_NO_MEMORY; 1139 goto err_arg; 1140 } 1141 1142 /* Get a new obj to store the v1.1 descriptor. */ 1143 v1_1_obj = 1144 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size); 1145 1146 if (!v1_1_obj) { 1147 ret = FFA_ERROR_NO_MEMORY; 1148 goto err_arg; 1149 } 1150 1151 /* Perform the conversion from v1.0 to v1.1. */ 1152 v1_1_obj->desc_size = (uint32_t)v1_1_desc_size; 1153 v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size; 1154 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) { 1155 ERROR("%s: Could not convert mtd!\n", __func__); 1156 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj); 1157 goto err_arg; 1158 } 1159 1160 /* 1161 * We're finished with the v1.0 descriptor so free it 1162 * and continue our checks with the new v1.1 descriptor. 1163 */ 1164 mem_handle = obj->desc.handle; 1165 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); 1166 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); 1167 if (obj == NULL) { 1168 ERROR("%s: Failed to find converted descriptor.\n", 1169 __func__); 1170 ret = FFA_ERROR_INVALID_PARAMETER; 1171 return spmc_ffa_error_return(smc_handle, ret); 1172 } 1173 } 1174 1175 /* Allow for platform specific operations to be performed. */ 1176 ret = plat_spmc_shmem_begin(&obj->desc); 1177 if (ret != 0) { 1178 goto err_arg; 1179 } 1180 1181 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0, 1182 0, 0, 0); 1183 1184 err_bad_desc: 1185 err_arg: 1186 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); 1187 return spmc_ffa_error_return(smc_handle, ret); 1188 } 1189 1190 /** 1191 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation. 1192 * @client: Client state. 1193 * @total_length: Total length of shared memory descriptor. 1194 * @fragment_length: Length of fragment of shared memory descriptor passed in 1195 * this call. 1196 * @address: Not supported, must be 0. 1197 * @page_count: Not supported, must be 0. 1198 * @smc_handle: Handle passed to smc call. Used to return 1199 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. 1200 * 1201 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed 1202 * to share or lend memory from non-secure os to secure os (with no stream 1203 * endpoints). 1204 * 1205 * Return: 0 on success, error code on failure. 1206 */ 1207 long spmc_ffa_mem_send(uint32_t smc_fid, 1208 bool secure_origin, 1209 uint64_t total_length, 1210 uint32_t fragment_length, 1211 uint64_t address, 1212 uint32_t page_count, 1213 void *cookie, 1214 void *handle, 1215 uint64_t flags) 1216 1217 { 1218 long ret; 1219 struct spmc_shmem_obj *obj; 1220 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1221 ffa_mtd_flag32_t mtd_flag; 1222 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1223 size_t min_desc_size; 1224 1225 if (address != 0U || page_count != 0U) { 1226 WARN("%s: custom memory region for message not supported.\n", 1227 __func__); 1228 return spmc_ffa_error_return(handle, 1229 FFA_ERROR_INVALID_PARAMETER); 1230 } 1231 1232 if (secure_origin) { 1233 WARN("%s: unsupported share direction.\n", __func__); 1234 return spmc_ffa_error_return(handle, 1235 FFA_ERROR_INVALID_PARAMETER); 1236 } 1237 1238 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 1239 min_desc_size = sizeof(struct ffa_mtd_v1_0); 1240 } else if (compatible_version(ffa_version, 1, 1)) { 1241 min_desc_size = sizeof(struct ffa_mtd); 1242 } else { 1243 WARN("%s: bad FF-A version.\n", __func__); 1244 return spmc_ffa_error_return(handle, 1245 FFA_ERROR_INVALID_PARAMETER); 1246 } 1247 1248 /* Check if the descriptor is too small for the FF-A version. */ 1249 if (fragment_length < min_desc_size) { 1250 WARN("%s: bad first fragment size %u < %zu\n", 1251 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0)); 1252 return spmc_ffa_error_return(handle, 1253 FFA_ERROR_INVALID_PARAMETER); 1254 } 1255 1256 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) { 1257 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY; 1258 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) { 1259 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY; 1260 } else { 1261 WARN("%s: invalid memory management operation.\n", __func__); 1262 return spmc_ffa_error_return(handle, 1263 FFA_ERROR_INVALID_PARAMETER); 1264 } 1265 1266 spin_lock(&spmc_shmem_obj_state.lock); 1267 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length); 1268 if (obj == NULL) { 1269 ret = FFA_ERROR_NO_MEMORY; 1270 goto err_unlock; 1271 } 1272 1273 spin_lock(&mbox->lock); 1274 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag, 1275 ffa_version, handle); 1276 spin_unlock(&mbox->lock); 1277 1278 spin_unlock(&spmc_shmem_obj_state.lock); 1279 return ret; 1280 1281 err_unlock: 1282 spin_unlock(&spmc_shmem_obj_state.lock); 1283 return spmc_ffa_error_return(handle, ret); 1284 } 1285 1286 /** 1287 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation. 1288 * @client: Client state. 1289 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX. 1290 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX. 1291 * @fragment_length: Length of fragments transmitted. 1292 * @sender_id: Vmid of sender in bits [31:16] 1293 * @smc_handle: Handle passed to smc call. Used to return 1294 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS. 1295 * 1296 * Return: @smc_handle on success, error code on failure. 1297 */ 1298 long spmc_ffa_mem_frag_tx(uint32_t smc_fid, 1299 bool secure_origin, 1300 uint64_t handle_low, 1301 uint64_t handle_high, 1302 uint32_t fragment_length, 1303 uint32_t sender_id, 1304 void *cookie, 1305 void *handle, 1306 uint64_t flags) 1307 { 1308 long ret; 1309 uint32_t desc_sender_id; 1310 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1311 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1312 1313 struct spmc_shmem_obj *obj; 1314 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); 1315 1316 spin_lock(&spmc_shmem_obj_state.lock); 1317 1318 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); 1319 if (obj == NULL) { 1320 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", 1321 __func__, mem_handle); 1322 ret = FFA_ERROR_INVALID_PARAMETER; 1323 goto err_unlock; 1324 } 1325 1326 desc_sender_id = (uint32_t)obj->desc.sender_id << 16; 1327 if (sender_id != desc_sender_id) { 1328 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, 1329 sender_id, desc_sender_id); 1330 ret = FFA_ERROR_INVALID_PARAMETER; 1331 goto err_unlock; 1332 } 1333 1334 if (obj->desc_filled == obj->desc_size) { 1335 WARN("%s: object desc already filled, %zu\n", __func__, 1336 obj->desc_filled); 1337 ret = FFA_ERROR_INVALID_PARAMETER; 1338 goto err_unlock; 1339 } 1340 1341 spin_lock(&mbox->lock); 1342 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version, 1343 handle); 1344 spin_unlock(&mbox->lock); 1345 1346 spin_unlock(&spmc_shmem_obj_state.lock); 1347 return ret; 1348 1349 err_unlock: 1350 spin_unlock(&spmc_shmem_obj_state.lock); 1351 return spmc_ffa_error_return(handle, ret); 1352 } 1353 1354 /** 1355 * spmc_ffa_mem_retrieve_update_ns_bit - Update the NS bit in the response descriptor 1356 * if the caller implements a version smaller 1357 * than FF-A 1.1 and if they have not requested 1358 * the functionality. 1359 * @resp: Descriptor populated in callers RX buffer. 1360 * @sp_ctx: Context of the calling SP. 1361 */ 1362 void spmc_ffa_mem_retrieve_update_ns_bit(struct ffa_mtd *resp, 1363 struct secure_partition_desc *sp_ctx, 1364 bool secure_origin) 1365 { 1366 if (secure_origin && 1367 sp_ctx->ffa_version < MAKE_FFA_VERSION(1, 1) && 1368 !sp_ctx->ns_bit_requested) { 1369 resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT; 1370 } 1371 } 1372 1373 /** 1374 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation. 1375 * @smc_fid: FID of SMC 1376 * @total_length: Total length of retrieve request descriptor if this is 1377 * the first call. Otherwise (unsupported) must be 0. 1378 * @fragment_length: Length of fragment of retrieve request descriptor passed 1379 * in this call. Only @fragment_length == @length is 1380 * supported by this implementation. 1381 * @address: Not supported, must be 0. 1382 * @page_count: Not supported, must be 0. 1383 * @smc_handle: Handle passed to smc call. Used to return 1384 * FFA_MEM_RETRIEVE_RESP. 1385 * 1386 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call. 1387 * Used by secure os to retrieve memory already shared by non-secure os. 1388 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message, 1389 * the client must call FFA_MEM_FRAG_RX until the full response has been 1390 * received. 1391 * 1392 * Return: @handle on success, error code on failure. 1393 */ 1394 long 1395 spmc_ffa_mem_retrieve_req(uint32_t smc_fid, 1396 bool secure_origin, 1397 uint32_t total_length, 1398 uint32_t fragment_length, 1399 uint64_t address, 1400 uint32_t page_count, 1401 void *cookie, 1402 void *handle, 1403 uint64_t flags) 1404 { 1405 int ret; 1406 size_t buf_size; 1407 size_t copy_size = 0; 1408 size_t min_desc_size; 1409 size_t out_desc_size = 0; 1410 1411 /* 1412 * Currently we are only accessing fields that are the same in both the 1413 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly 1414 * here. We only need validate against the appropriate struct size. 1415 */ 1416 struct ffa_mtd *resp; 1417 const struct ffa_mtd *req; 1418 struct spmc_shmem_obj *obj = NULL; 1419 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1420 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1421 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx(); 1422 1423 if (!secure_origin) { 1424 WARN("%s: unsupported retrieve req direction.\n", __func__); 1425 return spmc_ffa_error_return(handle, 1426 FFA_ERROR_INVALID_PARAMETER); 1427 } 1428 1429 if (address != 0U || page_count != 0U) { 1430 WARN("%s: custom memory region not supported.\n", __func__); 1431 return spmc_ffa_error_return(handle, 1432 FFA_ERROR_INVALID_PARAMETER); 1433 } 1434 1435 spin_lock(&mbox->lock); 1436 1437 req = mbox->tx_buffer; 1438 resp = mbox->rx_buffer; 1439 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 1440 1441 if (mbox->rxtx_page_count == 0U) { 1442 WARN("%s: buffer pair not registered.\n", __func__); 1443 ret = FFA_ERROR_INVALID_PARAMETER; 1444 goto err_unlock_mailbox; 1445 } 1446 1447 if (mbox->state != MAILBOX_STATE_EMPTY) { 1448 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state); 1449 ret = FFA_ERROR_DENIED; 1450 goto err_unlock_mailbox; 1451 } 1452 1453 if (fragment_length != total_length) { 1454 WARN("%s: fragmented retrieve request not supported.\n", 1455 __func__); 1456 ret = FFA_ERROR_INVALID_PARAMETER; 1457 goto err_unlock_mailbox; 1458 } 1459 1460 if (req->emad_count == 0U) { 1461 WARN("%s: unsupported attribute desc count %u.\n", 1462 __func__, obj->desc.emad_count); 1463 ret = FFA_ERROR_INVALID_PARAMETER; 1464 goto err_unlock_mailbox; 1465 } 1466 1467 /* Determine the appropriate minimum descriptor size. */ 1468 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 1469 min_desc_size = sizeof(struct ffa_mtd_v1_0); 1470 } else { 1471 min_desc_size = sizeof(struct ffa_mtd); 1472 } 1473 if (total_length < min_desc_size) { 1474 WARN("%s: invalid length %u < %zu\n", __func__, total_length, 1475 min_desc_size); 1476 ret = FFA_ERROR_INVALID_PARAMETER; 1477 goto err_unlock_mailbox; 1478 } 1479 1480 spin_lock(&spmc_shmem_obj_state.lock); 1481 1482 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); 1483 if (obj == NULL) { 1484 ret = FFA_ERROR_INVALID_PARAMETER; 1485 goto err_unlock_all; 1486 } 1487 1488 if (obj->desc_filled != obj->desc_size) { 1489 WARN("%s: incomplete object desc filled %zu < size %zu\n", 1490 __func__, obj->desc_filled, obj->desc_size); 1491 ret = FFA_ERROR_INVALID_PARAMETER; 1492 goto err_unlock_all; 1493 } 1494 1495 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) { 1496 WARN("%s: wrong sender id 0x%x != 0x%x\n", 1497 __func__, req->sender_id, obj->desc.sender_id); 1498 ret = FFA_ERROR_INVALID_PARAMETER; 1499 goto err_unlock_all; 1500 } 1501 1502 if (req->emad_count != 0U && req->tag != obj->desc.tag) { 1503 WARN("%s: wrong tag 0x%lx != 0x%lx\n", 1504 __func__, req->tag, obj->desc.tag); 1505 ret = FFA_ERROR_INVALID_PARAMETER; 1506 goto err_unlock_all; 1507 } 1508 1509 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) { 1510 WARN("%s: mistmatch of endpoint counts %u != %u\n", 1511 __func__, req->emad_count, obj->desc.emad_count); 1512 ret = FFA_ERROR_INVALID_PARAMETER; 1513 goto err_unlock_all; 1514 } 1515 1516 /* Ensure the NS bit is set to 0 in the request. */ 1517 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) { 1518 WARN("%s: NS mem attributes flags MBZ.\n", __func__); 1519 ret = FFA_ERROR_INVALID_PARAMETER; 1520 goto err_unlock_all; 1521 } 1522 1523 if (req->flags != 0U) { 1524 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) != 1525 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) { 1526 /* 1527 * If the retrieve request specifies the memory 1528 * transaction ensure it matches what we expect. 1529 */ 1530 WARN("%s: wrong mem transaction flags %x != %x\n", 1531 __func__, req->flags, obj->desc.flags); 1532 ret = FFA_ERROR_INVALID_PARAMETER; 1533 goto err_unlock_all; 1534 } 1535 1536 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY && 1537 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) { 1538 /* 1539 * Current implementation does not support donate and 1540 * it supports no other flags. 1541 */ 1542 WARN("%s: invalid flags 0x%x\n", __func__, req->flags); 1543 ret = FFA_ERROR_INVALID_PARAMETER; 1544 goto err_unlock_all; 1545 } 1546 } 1547 1548 /* Validate the caller is a valid participant. */ 1549 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) { 1550 WARN("%s: Invalid endpoint ID (0x%x).\n", 1551 __func__, sp_ctx->sp_id); 1552 ret = FFA_ERROR_INVALID_PARAMETER; 1553 goto err_unlock_all; 1554 } 1555 1556 /* Validate that the provided emad offset and structure is valid.*/ 1557 for (size_t i = 0; i < req->emad_count; i++) { 1558 size_t emad_size; 1559 struct ffa_emad_v1_0 *emad; 1560 1561 emad = spmc_shmem_obj_get_emad(req, i, ffa_version, 1562 &emad_size); 1563 1564 if ((uintptr_t) emad >= (uintptr_t) 1565 ((uint8_t *) req + total_length)) { 1566 WARN("Invalid emad access.\n"); 1567 ret = FFA_ERROR_INVALID_PARAMETER; 1568 goto err_unlock_all; 1569 } 1570 } 1571 1572 /* 1573 * Validate all the endpoints match in the case of multiple 1574 * borrowers. We don't mandate that the order of the borrowers 1575 * must match in the descriptors therefore check to see if the 1576 * endpoints match in any order. 1577 */ 1578 for (size_t i = 0; i < req->emad_count; i++) { 1579 bool found = false; 1580 size_t emad_size; 1581 struct ffa_emad_v1_0 *emad; 1582 struct ffa_emad_v1_0 *other_emad; 1583 1584 emad = spmc_shmem_obj_get_emad(req, i, ffa_version, 1585 &emad_size); 1586 1587 for (size_t j = 0; j < obj->desc.emad_count; j++) { 1588 other_emad = spmc_shmem_obj_get_emad( 1589 &obj->desc, j, MAKE_FFA_VERSION(1, 1), 1590 &emad_size); 1591 1592 if (req->emad_count && 1593 emad->mapd.endpoint_id == 1594 other_emad->mapd.endpoint_id) { 1595 found = true; 1596 break; 1597 } 1598 } 1599 1600 if (!found) { 1601 WARN("%s: invalid receiver id (0x%x).\n", 1602 __func__, emad->mapd.endpoint_id); 1603 ret = FFA_ERROR_INVALID_PARAMETER; 1604 goto err_unlock_all; 1605 } 1606 } 1607 1608 mbox->state = MAILBOX_STATE_FULL; 1609 1610 if (req->emad_count != 0U) { 1611 obj->in_use++; 1612 } 1613 1614 /* 1615 * If the caller is v1.0 convert the descriptor, otherwise copy 1616 * directly. 1617 */ 1618 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 1619 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0, 1620 ©_size, 1621 &out_desc_size); 1622 if (ret != 0U) { 1623 ERROR("%s: Failed to process descriptor.\n", __func__); 1624 goto err_unlock_all; 1625 } 1626 } else { 1627 copy_size = MIN(obj->desc_size, buf_size); 1628 out_desc_size = obj->desc_size; 1629 1630 memcpy(resp, &obj->desc, copy_size); 1631 } 1632 1633 /* Update the NS bit in the response if applicable. */ 1634 spmc_ffa_mem_retrieve_update_ns_bit(resp, sp_ctx, secure_origin); 1635 1636 spin_unlock(&spmc_shmem_obj_state.lock); 1637 spin_unlock(&mbox->lock); 1638 1639 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size, 1640 copy_size, 0, 0, 0, 0, 0); 1641 1642 err_unlock_all: 1643 spin_unlock(&spmc_shmem_obj_state.lock); 1644 err_unlock_mailbox: 1645 spin_unlock(&mbox->lock); 1646 return spmc_ffa_error_return(handle, ret); 1647 } 1648 1649 /** 1650 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation. 1651 * @client: Client state. 1652 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0]. 1653 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32]. 1654 * @fragment_offset: Byte offset in descriptor to resume at. 1655 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a 1656 * hypervisor. 0 otherwise. 1657 * @smc_handle: Handle passed to smc call. Used to return 1658 * FFA_MEM_FRAG_TX. 1659 * 1660 * Return: @smc_handle on success, error code on failure. 1661 */ 1662 long spmc_ffa_mem_frag_rx(uint32_t smc_fid, 1663 bool secure_origin, 1664 uint32_t handle_low, 1665 uint32_t handle_high, 1666 uint32_t fragment_offset, 1667 uint32_t sender_id, 1668 void *cookie, 1669 void *handle, 1670 uint64_t flags) 1671 { 1672 int ret; 1673 void *src; 1674 size_t buf_size; 1675 size_t copy_size; 1676 size_t full_copy_size; 1677 uint32_t desc_sender_id; 1678 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1679 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); 1680 struct spmc_shmem_obj *obj; 1681 uint32_t ffa_version = get_partition_ffa_version(secure_origin); 1682 1683 if (!secure_origin) { 1684 WARN("%s: can only be called from swld.\n", 1685 __func__); 1686 return spmc_ffa_error_return(handle, 1687 FFA_ERROR_INVALID_PARAMETER); 1688 } 1689 1690 spin_lock(&spmc_shmem_obj_state.lock); 1691 1692 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); 1693 if (obj == NULL) { 1694 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n", 1695 __func__, mem_handle); 1696 ret = FFA_ERROR_INVALID_PARAMETER; 1697 goto err_unlock_shmem; 1698 } 1699 1700 desc_sender_id = (uint32_t)obj->desc.sender_id << 16; 1701 if (sender_id != 0U && sender_id != desc_sender_id) { 1702 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__, 1703 sender_id, desc_sender_id); 1704 ret = FFA_ERROR_INVALID_PARAMETER; 1705 goto err_unlock_shmem; 1706 } 1707 1708 if (fragment_offset >= obj->desc_size) { 1709 WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n", 1710 __func__, fragment_offset, obj->desc_size); 1711 ret = FFA_ERROR_INVALID_PARAMETER; 1712 goto err_unlock_shmem; 1713 } 1714 1715 spin_lock(&mbox->lock); 1716 1717 if (mbox->rxtx_page_count == 0U) { 1718 WARN("%s: buffer pair not registered.\n", __func__); 1719 ret = FFA_ERROR_INVALID_PARAMETER; 1720 goto err_unlock_all; 1721 } 1722 1723 if (mbox->state != MAILBOX_STATE_EMPTY) { 1724 WARN("%s: RX Buffer is full!\n", __func__); 1725 ret = FFA_ERROR_DENIED; 1726 goto err_unlock_all; 1727 } 1728 1729 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE; 1730 1731 mbox->state = MAILBOX_STATE_FULL; 1732 1733 /* 1734 * If the caller is v1.0 convert the descriptor, otherwise copy 1735 * directly. 1736 */ 1737 if (ffa_version == MAKE_FFA_VERSION(1, 0)) { 1738 size_t out_desc_size; 1739 1740 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj, 1741 buf_size, 1742 fragment_offset, 1743 ©_size, 1744 &out_desc_size); 1745 if (ret != 0U) { 1746 ERROR("%s: Failed to process descriptor.\n", __func__); 1747 goto err_unlock_all; 1748 } 1749 } else { 1750 full_copy_size = obj->desc_size - fragment_offset; 1751 copy_size = MIN(full_copy_size, buf_size); 1752 1753 src = &obj->desc; 1754 1755 memcpy(mbox->rx_buffer, src + fragment_offset, copy_size); 1756 } 1757 1758 spin_unlock(&mbox->lock); 1759 spin_unlock(&spmc_shmem_obj_state.lock); 1760 1761 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high, 1762 copy_size, sender_id, 0, 0, 0); 1763 1764 err_unlock_all: 1765 spin_unlock(&mbox->lock); 1766 err_unlock_shmem: 1767 spin_unlock(&spmc_shmem_obj_state.lock); 1768 return spmc_ffa_error_return(handle, ret); 1769 } 1770 1771 /** 1772 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation. 1773 * @client: Client state. 1774 * 1775 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call. 1776 * Used by secure os release previously shared memory to non-secure os. 1777 * 1778 * The handle to release must be in the client's (secure os's) transmit buffer. 1779 * 1780 * Return: 0 on success, error code on failure. 1781 */ 1782 int spmc_ffa_mem_relinquish(uint32_t smc_fid, 1783 bool secure_origin, 1784 uint32_t handle_low, 1785 uint32_t handle_high, 1786 uint32_t fragment_offset, 1787 uint32_t sender_id, 1788 void *cookie, 1789 void *handle, 1790 uint64_t flags) 1791 { 1792 int ret; 1793 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin); 1794 struct spmc_shmem_obj *obj; 1795 const struct ffa_mem_relinquish_descriptor *req; 1796 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx(); 1797 1798 if (!secure_origin) { 1799 WARN("%s: unsupported relinquish direction.\n", __func__); 1800 return spmc_ffa_error_return(handle, 1801 FFA_ERROR_INVALID_PARAMETER); 1802 } 1803 1804 spin_lock(&mbox->lock); 1805 1806 if (mbox->rxtx_page_count == 0U) { 1807 WARN("%s: buffer pair not registered.\n", __func__); 1808 ret = FFA_ERROR_INVALID_PARAMETER; 1809 goto err_unlock_mailbox; 1810 } 1811 1812 req = mbox->tx_buffer; 1813 1814 if (req->flags != 0U) { 1815 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags); 1816 ret = FFA_ERROR_INVALID_PARAMETER; 1817 goto err_unlock_mailbox; 1818 } 1819 1820 if (req->endpoint_count == 0) { 1821 WARN("%s: endpoint count cannot be 0.\n", __func__); 1822 ret = FFA_ERROR_INVALID_PARAMETER; 1823 goto err_unlock_mailbox; 1824 } 1825 1826 spin_lock(&spmc_shmem_obj_state.lock); 1827 1828 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle); 1829 if (obj == NULL) { 1830 ret = FFA_ERROR_INVALID_PARAMETER; 1831 goto err_unlock_all; 1832 } 1833 1834 /* 1835 * Validate the endpoint ID was populated correctly. We don't currently 1836 * support proxy endpoints so the endpoint count should always be 1. 1837 */ 1838 if (req->endpoint_count != 1U) { 1839 WARN("%s: unsupported endpoint count %u != 1\n", __func__, 1840 req->endpoint_count); 1841 ret = FFA_ERROR_INVALID_PARAMETER; 1842 goto err_unlock_all; 1843 } 1844 1845 /* Validate provided endpoint ID matches the partition ID. */ 1846 if (req->endpoint_array[0] != sp_ctx->sp_id) { 1847 WARN("%s: invalid endpoint ID %u != %u\n", __func__, 1848 req->endpoint_array[0], sp_ctx->sp_id); 1849 ret = FFA_ERROR_INVALID_PARAMETER; 1850 goto err_unlock_all; 1851 } 1852 1853 /* Validate the caller is a valid participant. */ 1854 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) { 1855 WARN("%s: Invalid endpoint ID (0x%x).\n", 1856 __func__, req->endpoint_array[0]); 1857 ret = FFA_ERROR_INVALID_PARAMETER; 1858 goto err_unlock_all; 1859 } 1860 1861 if (obj->in_use == 0U) { 1862 ret = FFA_ERROR_INVALID_PARAMETER; 1863 goto err_unlock_all; 1864 } 1865 obj->in_use--; 1866 1867 spin_unlock(&spmc_shmem_obj_state.lock); 1868 spin_unlock(&mbox->lock); 1869 1870 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1871 1872 err_unlock_all: 1873 spin_unlock(&spmc_shmem_obj_state.lock); 1874 err_unlock_mailbox: 1875 spin_unlock(&mbox->lock); 1876 return spmc_ffa_error_return(handle, ret); 1877 } 1878 1879 /** 1880 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation. 1881 * @client: Client state. 1882 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0]. 1883 * @handle_high: Unique handle of shared memory object to reclaim. 1884 * Bit[63:32]. 1885 * @flags: Unsupported, ignored. 1886 * 1887 * Implements a subset of the FF-A FFA_MEM_RECLAIM call. 1888 * Used by non-secure os reclaim memory previously shared with secure os. 1889 * 1890 * Return: 0 on success, error code on failure. 1891 */ 1892 int spmc_ffa_mem_reclaim(uint32_t smc_fid, 1893 bool secure_origin, 1894 uint32_t handle_low, 1895 uint32_t handle_high, 1896 uint32_t mem_flags, 1897 uint64_t x4, 1898 void *cookie, 1899 void *handle, 1900 uint64_t flags) 1901 { 1902 int ret; 1903 struct spmc_shmem_obj *obj; 1904 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32); 1905 1906 if (secure_origin) { 1907 WARN("%s: unsupported reclaim direction.\n", __func__); 1908 return spmc_ffa_error_return(handle, 1909 FFA_ERROR_INVALID_PARAMETER); 1910 } 1911 1912 if (mem_flags != 0U) { 1913 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags); 1914 return spmc_ffa_error_return(handle, 1915 FFA_ERROR_INVALID_PARAMETER); 1916 } 1917 1918 spin_lock(&spmc_shmem_obj_state.lock); 1919 1920 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle); 1921 if (obj == NULL) { 1922 ret = FFA_ERROR_INVALID_PARAMETER; 1923 goto err_unlock; 1924 } 1925 if (obj->in_use != 0U) { 1926 ret = FFA_ERROR_DENIED; 1927 goto err_unlock; 1928 } 1929 1930 if (obj->desc_filled != obj->desc_size) { 1931 WARN("%s: incomplete object desc filled %zu < size %zu\n", 1932 __func__, obj->desc_filled, obj->desc_size); 1933 ret = FFA_ERROR_INVALID_PARAMETER; 1934 goto err_unlock; 1935 } 1936 1937 /* Allow for platform specific operations to be performed. */ 1938 ret = plat_spmc_shmem_reclaim(&obj->desc); 1939 if (ret != 0) { 1940 goto err_unlock; 1941 } 1942 1943 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj); 1944 spin_unlock(&spmc_shmem_obj_state.lock); 1945 1946 SMC_RET1(handle, FFA_SUCCESS_SMC32); 1947 1948 err_unlock: 1949 spin_unlock(&spmc_shmem_obj_state.lock); 1950 return spmc_ffa_error_return(handle, ret); 1951 } 1952