xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision cbbb8a03d627b9734e8ad605c0bd1565effa7097)
1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 
10 #include <common/debug.h>
11 #include <common/runtime_svc.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <services/ffa_svc.h>
16 #include "spmc.h"
17 #include "spmc_shared_mem.h"
18 
19 #include <platform_def.h>
20 
21 /**
22  * struct spmc_shmem_obj - Shared memory object.
23  * @desc_size:      Size of @desc.
24  * @desc_filled:    Size of @desc already received.
25  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
26  *                  without a matching ffa_mem_relinquish call.
27  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
28  */
29 struct spmc_shmem_obj {
30 	size_t desc_size;
31 	size_t desc_filled;
32 	size_t in_use;
33 	struct ffa_mtd desc;
34 };
35 
36 /*
37  * Declare our data structure to store the metadata of memory share requests.
38  * The main datastore is allocated on a per platform basis to ensure enough
39  * storage can be made available.
40  * The address of the data store will be populated by the SPMC during its
41  * initialization.
42  */
43 
44 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 	/* Set start value for handle so top 32 bits are needed quickly. */
46 	.next_handle = 0xffffffc0U,
47 };
48 
49 /**
50  * spmc_shmem_obj_size - Convert from descriptor size to object size.
51  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
52  *
53  * Return: Size of struct spmc_shmem_obj object.
54  */
55 static size_t spmc_shmem_obj_size(size_t desc_size)
56 {
57 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
58 }
59 
60 /**
61  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62  * @state:      Global state.
63  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
64  *              allocated object will hold.
65  *
66  * Return: Pointer to newly allocated object, or %NULL if there not enough space
67  *         left. The returned pointer is only valid while @state is locked, to
68  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
69  *         called.
70  */
71 static struct spmc_shmem_obj *
72 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73 {
74 	struct spmc_shmem_obj *obj;
75 	size_t free = state->data_size - state->allocated;
76 	size_t obj_size;
77 
78 	if (state->data == NULL) {
79 		ERROR("Missing shmem datastore!\n");
80 		return NULL;
81 	}
82 
83 	obj_size = spmc_shmem_obj_size(desc_size);
84 
85 	/* Ensure the obj size has not overflowed. */
86 	if (obj_size < desc_size) {
87 		WARN("%s(0x%zx) desc_size overflow\n",
88 		     __func__, desc_size);
89 		return NULL;
90 	}
91 
92 	if (obj_size > free) {
93 		WARN("%s(0x%zx) failed, free 0x%zx\n",
94 		     __func__, desc_size, free);
95 		return NULL;
96 	}
97 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
98 	obj->desc = (struct ffa_mtd) {0};
99 	obj->desc_size = desc_size;
100 	obj->desc_filled = 0;
101 	obj->in_use = 0;
102 	state->allocated += obj_size;
103 	return obj;
104 }
105 
106 /**
107  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108  * @state:      Global state.
109  * @obj:        Object to free.
110  *
111  * Release memory used by @obj. Other objects may move, so on return all
112  * pointers to struct spmc_shmem_obj object should be considered invalid, not
113  * just @obj.
114  *
115  * The current implementation always compacts the remaining objects to simplify
116  * the allocator and to avoid fragmentation.
117  */
118 
119 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 				  struct spmc_shmem_obj *obj)
121 {
122 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 	uint8_t *shift_dest = (uint8_t *)obj;
124 	uint8_t *shift_src = shift_dest + free_size;
125 	size_t shift_size = state->allocated - (shift_src - state->data);
126 
127 	if (shift_size != 0U) {
128 		memmove(shift_dest, shift_src, shift_size);
129 	}
130 	state->allocated -= free_size;
131 }
132 
133 /**
134  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135  * @state:      Global state.
136  * @handle:     Unique handle of object to return.
137  *
138  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139  *         %NULL, if not object in @state->data has a matching handle.
140  */
141 static struct spmc_shmem_obj *
142 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143 {
144 	uint8_t *curr = state->data;
145 
146 	while (curr - state->data < state->allocated) {
147 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148 
149 		if (obj->desc.handle == handle) {
150 			return obj;
151 		}
152 		curr += spmc_shmem_obj_size(obj->desc_size);
153 	}
154 	return NULL;
155 }
156 
157 /**
158  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159  * @offset:     Offset used to track which objects have previously been
160  *              returned.
161  *
162  * Return: the next struct spmc_shmem_obj_state object from the provided
163  *	   offset.
164  *	   %NULL, if there are no more objects.
165  */
166 static struct spmc_shmem_obj *
167 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168 {
169 	uint8_t *curr = state->data + *offset;
170 
171 	if (curr - state->data < state->allocated) {
172 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173 
174 		*offset += spmc_shmem_obj_size(obj->desc_size);
175 
176 		return obj;
177 	}
178 	return NULL;
179 }
180 
181 /*******************************************************************************
182  * FF-A memory descriptor helper functions.
183  ******************************************************************************/
184 /**
185  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186  *                           clients FF-A version.
187  * @desc:         The memory transaction descriptor.
188  * @index:        The index of the emad element to be accessed.
189  * @ffa_version:  FF-A version of the provided structure.
190  * @emad_size:    Will be populated with the size of the returned emad
191  *                descriptor.
192  * Return: A pointer to the requested emad structure.
193  */
194 static void *
195 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 			uint32_t ffa_version, size_t *emad_size)
197 {
198 	uint8_t *emad;
199 	/*
200 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
201 	 * format, otherwise assume it is a v1.1 format.
202 	 */
203 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
204 		emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
205 		*emad_size = sizeof(struct ffa_emad_v1_0);
206 	} else {
207 		assert(is_aligned(desc->emad_offset, 16));
208 		emad = ((uint8_t *) desc + desc->emad_offset);
209 		*emad_size = desc->emad_size;
210 	}
211 
212 	assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
213 	return (emad + (*emad_size * index));
214 }
215 
216 /**
217  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
218  *				 FF-A version of the descriptor.
219  * @obj:    Object containing ffa_memory_region_descriptor.
220  *
221  * Return: struct ffa_comp_mrd object corresponding to the composite memory
222  *	   region descriptor.
223  */
224 static struct ffa_comp_mrd *
225 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
226 {
227 	size_t emad_size;
228 	/*
229 	 * The comp_mrd_offset field of the emad descriptor remains consistent
230 	 * between FF-A versions therefore we can use the v1.0 descriptor here
231 	 * in all cases.
232 	 */
233 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
234 							     ffa_version,
235 							     &emad_size);
236 
237 	/* Ensure the composite descriptor offset is aligned. */
238 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
239 		WARN("Unaligned composite memory region descriptor offset.\n");
240 		return NULL;
241 	}
242 
243 	return (struct ffa_comp_mrd *)
244 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
245 }
246 
247 /**
248  * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
249  * @obj:    Object containing ffa_memory_region_descriptor.
250  *
251  * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
252  */
253 static size_t
254 spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
255 				    uint32_t ffa_version)
256 {
257 	struct ffa_comp_mrd *comp_mrd;
258 
259 	comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
260 	if (comp_mrd == NULL) {
261 		return 0;
262 	}
263 	return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
264 }
265 
266 /**
267  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
268  *				a given memory transaction.
269  * @sp_id:      Partition ID to validate.
270  * @obj:        The shared memory object containing the descriptor
271  *              of the memory transaction.
272  * Return: true if ID is valid, else false.
273  */
274 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
275 {
276 	bool found = false;
277 	struct ffa_mtd *desc = &obj->desc;
278 	size_t desc_size = obj->desc_size;
279 
280 	/* Validate the partition is a valid participant. */
281 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
282 		size_t emad_size;
283 		struct ffa_emad_v1_0 *emad;
284 
285 		emad = spmc_shmem_obj_get_emad(desc, i,
286 					       MAKE_FFA_VERSION(1, 1),
287 					       &emad_size);
288 		/*
289 		 * Validate the calculated emad address resides within the
290 		 * descriptor.
291 		 */
292 		if ((emad == NULL) || (uintptr_t) emad >=
293 		    (uintptr_t)((uint8_t *) desc + desc_size)) {
294 			VERBOSE("Invalid emad.\n");
295 			break;
296 		}
297 		if (sp_id == emad->mapd.endpoint_id) {
298 			found = true;
299 			break;
300 		}
301 	}
302 	return found;
303 }
304 
305 /*
306  * Compare two memory regions to determine if any range overlaps with another
307  * ongoing memory transaction.
308  */
309 static bool
310 overlapping_memory_regions(struct ffa_comp_mrd *region1,
311 			   struct ffa_comp_mrd *region2)
312 {
313 	uint64_t region1_start;
314 	uint64_t region1_size;
315 	uint64_t region1_end;
316 	uint64_t region2_start;
317 	uint64_t region2_size;
318 	uint64_t region2_end;
319 
320 	assert(region1 != NULL);
321 	assert(region2 != NULL);
322 
323 	if (region1 == region2) {
324 		return true;
325 	}
326 
327 	/*
328 	 * Check each memory region in the request against existing
329 	 * transactions.
330 	 */
331 	for (size_t i = 0; i < region1->address_range_count; i++) {
332 
333 		region1_start = region1->address_range_array[i].address;
334 		region1_size =
335 			region1->address_range_array[i].page_count *
336 			PAGE_SIZE_4KB;
337 		region1_end = region1_start + region1_size;
338 
339 		for (size_t j = 0; j < region2->address_range_count; j++) {
340 
341 			region2_start = region2->address_range_array[j].address;
342 			region2_size =
343 				region2->address_range_array[j].page_count *
344 				PAGE_SIZE_4KB;
345 			region2_end = region2_start + region2_size;
346 
347 			/* Check if regions are not overlapping. */
348 			if (!((region2_end <= region1_start) ||
349 			      (region1_end <= region2_start))) {
350 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
351 				     region1_start, region1_end,
352 				     region2_start, region2_end);
353 				return true;
354 			}
355 		}
356 	}
357 	return false;
358 }
359 
360 /*******************************************************************************
361  * FF-A v1.0 Memory Descriptor Conversion Helpers.
362  ******************************************************************************/
363 /**
364  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
365  *                                     converted descriptor.
366  * @orig:       The original v1.0 memory transaction descriptor.
367  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
368  *
369  * Return: the size required to store the descriptor store in the v1.1 format.
370  */
371 static size_t
372 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
373 {
374 	size_t size = 0;
375 	struct ffa_comp_mrd *mrd;
376 	struct ffa_emad_v1_0 *emad_array = orig->emad;
377 
378 	/* Get the size of the v1.1 descriptor. */
379 	size += sizeof(struct ffa_mtd);
380 
381 	/* Add the size of the emad descriptors. */
382 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
383 
384 	/* Add the size of the composite mrds. */
385 	size += sizeof(struct ffa_comp_mrd);
386 
387 	/* Add the size of the constituent mrds. */
388 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
389 	      emad_array[0].comp_mrd_offset);
390 
391 	/* Check the calculated address is within the memory descriptor. */
392 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
393 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
394 		return 0;
395 	}
396 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
397 
398 	return size;
399 }
400 
401 /**
402  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
403  *                                     converted descriptor.
404  * @orig:       The original v1.1 memory transaction descriptor.
405  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
406  *
407  * Return: the size required to store the descriptor store in the v1.0 format.
408  */
409 static size_t
410 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
411 {
412 	size_t size = 0;
413 	struct ffa_comp_mrd *mrd;
414 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
415 					   ((uint8_t *) orig +
416 					    orig->emad_offset);
417 
418 	/* Get the size of the v1.0 descriptor. */
419 	size += sizeof(struct ffa_mtd_v1_0);
420 
421 	/* Add the size of the v1.0 emad descriptors. */
422 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
423 
424 	/* Add the size of the composite mrds. */
425 	size += sizeof(struct ffa_comp_mrd);
426 
427 	/* Add the size of the constituent mrds. */
428 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
429 	      emad_array[0].comp_mrd_offset);
430 
431 	/* Check the calculated address is within the memory descriptor. */
432 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
433 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
434 		return 0;
435 	}
436 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
437 
438 	return size;
439 }
440 
441 /**
442  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
443  * @out_obj:	The shared memory object to populate the converted descriptor.
444  * @orig:	The shared memory object containing the v1.0 descriptor.
445  *
446  * Return: true if the conversion is successful else false.
447  */
448 static bool
449 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
450 				     struct spmc_shmem_obj *orig)
451 {
452 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
453 	struct ffa_mtd *out = &out_obj->desc;
454 	struct ffa_emad_v1_0 *emad_array_in;
455 	struct ffa_emad_v1_0 *emad_array_out;
456 	struct ffa_comp_mrd *mrd_in;
457 	struct ffa_comp_mrd *mrd_out;
458 
459 	size_t mrd_in_offset;
460 	size_t mrd_out_offset;
461 	size_t mrd_size = 0;
462 
463 	/* Populate the new descriptor format from the v1.0 struct. */
464 	out->sender_id = mtd_orig->sender_id;
465 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
466 	out->flags = mtd_orig->flags;
467 	out->handle = mtd_orig->handle;
468 	out->tag = mtd_orig->tag;
469 	out->emad_count = mtd_orig->emad_count;
470 	out->emad_size = sizeof(struct ffa_emad_v1_0);
471 
472 	/*
473 	 * We will locate the emad descriptors directly after the ffa_mtd
474 	 * struct. This will be 8-byte aligned.
475 	 */
476 	out->emad_offset = sizeof(struct ffa_mtd);
477 
478 	emad_array_in = mtd_orig->emad;
479 	emad_array_out = (struct ffa_emad_v1_0 *)
480 			 ((uint8_t *) out + out->emad_offset);
481 
482 	/* Copy across the emad structs. */
483 	for (unsigned int i = 0U; i < out->emad_count; i++) {
484 		/* Bound check for emad array. */
485 		if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
486 		    ((uint8_t *) mtd_orig + orig->desc_size)) {
487 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
488 			return false;
489 		}
490 		memcpy(&emad_array_out[i], &emad_array_in[i],
491 		       sizeof(struct ffa_emad_v1_0));
492 	}
493 
494 	/* Place the mrd descriptors after the end of the emad descriptors.*/
495 	mrd_in_offset = emad_array_in->comp_mrd_offset;
496 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
497 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
498 
499 	/* Add the size of the composite memory region descriptor. */
500 	mrd_size += sizeof(struct ffa_comp_mrd);
501 
502 	/* Find the mrd descriptor. */
503 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
504 
505 	/* Add the size of the constituent memory region descriptors. */
506 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
507 
508 	/*
509 	 * Update the offset in the emads by the delta between the input and
510 	 * output addresses.
511 	 */
512 	for (unsigned int i = 0U; i < out->emad_count; i++) {
513 		emad_array_out[i].comp_mrd_offset =
514 			emad_array_in[i].comp_mrd_offset +
515 			(mrd_out_offset - mrd_in_offset);
516 	}
517 
518 	/* Verify that we stay within bound of the memory descriptors. */
519 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
520 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
521 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
522 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
523 		ERROR("%s: Invalid mrd structure.\n", __func__);
524 		return false;
525 	}
526 
527 	/* Copy the mrd descriptors directly. */
528 	memcpy(mrd_out, mrd_in, mrd_size);
529 
530 	return true;
531 }
532 
533 /**
534  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
535  *                                v1.0 memory object.
536  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
537  * @orig:       The shared memory object containing the v1.1 descriptor.
538  *
539  * Return: true if the conversion is successful else false.
540  */
541 static bool
542 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
543 			     struct spmc_shmem_obj *orig)
544 {
545 	struct ffa_mtd *mtd_orig = &orig->desc;
546 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
547 	struct ffa_emad_v1_0 *emad_in;
548 	struct ffa_emad_v1_0 *emad_array_in;
549 	struct ffa_emad_v1_0 *emad_array_out;
550 	struct ffa_comp_mrd *mrd_in;
551 	struct ffa_comp_mrd *mrd_out;
552 
553 	size_t mrd_in_offset;
554 	size_t mrd_out_offset;
555 	size_t emad_out_array_size;
556 	size_t mrd_size = 0;
557 	size_t orig_desc_size = orig->desc_size;
558 
559 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
560 	out->sender_id = mtd_orig->sender_id;
561 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
562 	out->flags = mtd_orig->flags;
563 	out->handle = mtd_orig->handle;
564 	out->tag = mtd_orig->tag;
565 	out->emad_count = mtd_orig->emad_count;
566 
567 	/* Determine the location of the emad array in both descriptors. */
568 	emad_array_in = (struct ffa_emad_v1_0 *)
569 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
570 	emad_array_out = out->emad;
571 
572 	/* Copy across the emad structs. */
573 	emad_in = emad_array_in;
574 	for (unsigned int i = 0U; i < out->emad_count; i++) {
575 		/* Bound check for emad array. */
576 		if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
577 				((uint8_t *) mtd_orig + orig_desc_size)) {
578 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
579 			return false;
580 		}
581 		memcpy(&emad_array_out[i], emad_in,
582 		       sizeof(struct ffa_emad_v1_0));
583 
584 		emad_in +=  mtd_orig->emad_size;
585 	}
586 
587 	/* Place the mrd descriptors after the end of the emad descriptors. */
588 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
589 
590 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
591 			  emad_out_array_size;
592 
593 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
594 
595 	mrd_in_offset = mtd_orig->emad_offset +
596 			(mtd_orig->emad_size * mtd_orig->emad_count);
597 
598 	/* Add the size of the composite memory region descriptor. */
599 	mrd_size += sizeof(struct ffa_comp_mrd);
600 
601 	/* Find the mrd descriptor. */
602 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
603 
604 	/* Add the size of the constituent memory region descriptors. */
605 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
606 
607 	/*
608 	 * Update the offset in the emads by the delta between the input and
609 	 * output addresses.
610 	 */
611 	emad_in = emad_array_in;
612 
613 	for (unsigned int i = 0U; i < out->emad_count; i++) {
614 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
615 						    (mrd_out_offset -
616 						     mrd_in_offset);
617 		emad_in +=  mtd_orig->emad_size;
618 	}
619 
620 	/* Verify that we stay within bound of the memory descriptors. */
621 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
622 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
623 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
624 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
625 		ERROR("%s: Invalid mrd structure.\n", __func__);
626 		return false;
627 	}
628 
629 	/* Copy the mrd descriptors directly. */
630 	memcpy(mrd_out, mrd_in, mrd_size);
631 
632 	return true;
633 }
634 
635 /**
636  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
637  *                                     the v1.0 format and populates the
638  *                                     provided buffer.
639  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
640  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
641  * @buf_size:	    Size of the buffer to populate.
642  * @offset:	    The offset of the converted descriptor to copy.
643  * @copy_size:	    Will be populated with the number of bytes copied.
644  * @out_desc_size:  Will be populated with the total size of the v1.0
645  *                  descriptor.
646  *
647  * Return: 0 if conversion and population succeeded.
648  * Note: This function invalidates the reference to @orig therefore
649  * `spmc_shmem_obj_lookup` must be called if further usage is required.
650  */
651 static uint32_t
652 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
653 				 size_t buf_size, size_t offset,
654 				 size_t *copy_size, size_t *v1_0_desc_size)
655 {
656 		struct spmc_shmem_obj *v1_0_obj;
657 
658 		/* Calculate the size that the v1.0 descriptor will require. */
659 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
660 					&orig_obj->desc, orig_obj->desc_size);
661 
662 		if (*v1_0_desc_size == 0) {
663 			ERROR("%s: cannot determine size of descriptor.\n",
664 			      __func__);
665 			return FFA_ERROR_INVALID_PARAMETER;
666 		}
667 
668 		/* Get a new obj to store the v1.0 descriptor. */
669 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
670 						*v1_0_desc_size);
671 
672 		if (!v1_0_obj) {
673 			return FFA_ERROR_NO_MEMORY;
674 		}
675 
676 		/* Perform the conversion from v1.1 to v1.0. */
677 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
678 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
679 			return FFA_ERROR_INVALID_PARAMETER;
680 		}
681 
682 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
683 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
684 
685 		/*
686 		 * We're finished with the v1.0 descriptor for now so free it.
687 		 * Note that this will invalidate any references to the v1.1
688 		 * descriptor.
689 		 */
690 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
691 
692 		return 0;
693 }
694 
695 static int
696 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
697 			size_t fragment_length, size_t total_length)
698 {
699 	unsigned long long emad_end;
700 	unsigned long long emad_size;
701 	unsigned long long emad_offset;
702 	unsigned int min_desc_size;
703 
704 	/* Determine the appropriate minimum descriptor size. */
705 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
706 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
707 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
708 		min_desc_size = sizeof(struct ffa_mtd);
709 	} else {
710 		return FFA_ERROR_INVALID_PARAMETER;
711 	}
712 	if (fragment_length < min_desc_size) {
713 		WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
714 		     min_desc_size);
715 		return FFA_ERROR_INVALID_PARAMETER;
716 	}
717 
718 	if (desc->emad_count == 0U) {
719 		WARN("%s: unsupported attribute desc count %u.\n",
720 		     __func__, desc->emad_count);
721 		return FFA_ERROR_INVALID_PARAMETER;
722 	}
723 
724 	/*
725 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
726 	 * format, otherwise assume it is a v1.1 format.
727 	 */
728 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
729 		emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
730 	} else {
731 		if (!is_aligned(desc->emad_offset, 16)) {
732 			WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
733 			     __func__, desc->emad_offset);
734 			return FFA_ERROR_INVALID_PARAMETER;
735 		}
736 		if (desc->emad_offset < sizeof(struct ffa_mtd)) {
737 			WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
738 			     __func__, desc->emad_offset,
739 			     sizeof(struct ffa_mtd));
740 			return FFA_ERROR_INVALID_PARAMETER;
741 		}
742 		emad_offset = desc->emad_offset;
743 		if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
744 			WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
745 			     desc->emad_size, sizeof(struct ffa_emad_v1_0));
746 			return FFA_ERROR_INVALID_PARAMETER;
747 		}
748 		if (!is_aligned(desc->emad_size, 16)) {
749 			WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
750 			     __func__, desc->emad_size);
751 			return FFA_ERROR_INVALID_PARAMETER;
752 		}
753 		emad_size = desc->emad_size;
754 	}
755 
756 	/*
757 	 * Overflow is impossible: the arithmetic happens in at least 64-bit
758 	 * precision, but all of the operands are bounded by UINT32_MAX, and
759 	 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
760 	 * = (2^64 - 1).
761 	 */
762 	CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
763 	emad_end = (desc->emad_count * (unsigned long long)emad_size) +
764 		   (unsigned long long)sizeof(struct ffa_comp_mrd) +
765 		   (unsigned long long)emad_offset;
766 
767 	if (emad_end > total_length) {
768 		WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
769 		     __func__, emad_end, total_length);
770 		return FFA_ERROR_INVALID_PARAMETER;
771 	}
772 
773 	return 0;
774 }
775 
776 /**
777  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
778  * @obj:	  Object containing ffa_memory_region_descriptor.
779  * @ffa_version:  FF-A version of the provided descriptor.
780  *
781  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
782  * offset or count is invalid.
783  */
784 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
785 				uint32_t ffa_version)
786 {
787 	uint32_t comp_mrd_offset = 0;
788 
789 	if (obj->desc.emad_count == 0U) {
790 		WARN("%s: unsupported attribute desc count %u.\n",
791 		     __func__, obj->desc.emad_count);
792 		return -EINVAL;
793 	}
794 
795 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
796 		size_t size;
797 		size_t count;
798 		size_t expected_size;
799 		size_t total_page_count;
800 		size_t emad_size;
801 		size_t desc_size;
802 		size_t header_emad_size;
803 		uint32_t offset;
804 		struct ffa_comp_mrd *comp;
805 		struct ffa_emad_v1_0 *emad;
806 
807 		emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
808 					       ffa_version, &emad_size);
809 
810 		/*
811 		 * Validate the calculated emad address resides within the
812 		 * descriptor.
813 		 */
814 		if ((uintptr_t) emad >=
815 		    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
816 			WARN("Invalid emad access.\n");
817 			return -EINVAL;
818 		}
819 
820 		offset = emad->comp_mrd_offset;
821 
822 		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
823 			desc_size =  sizeof(struct ffa_mtd_v1_0);
824 		} else {
825 			desc_size =  sizeof(struct ffa_mtd);
826 		}
827 
828 		header_emad_size = desc_size +
829 			(obj->desc.emad_count * emad_size);
830 
831 		if (offset < header_emad_size) {
832 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
833 			     __func__, offset, header_emad_size);
834 			return -EINVAL;
835 		}
836 
837 		size = obj->desc_size;
838 
839 		if (offset > size) {
840 			WARN("%s: invalid object, offset %u > total size %zu\n",
841 			     __func__, offset, obj->desc_size);
842 			return -EINVAL;
843 		}
844 		size -= offset;
845 
846 		if (size < sizeof(struct ffa_comp_mrd)) {
847 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
848 			     __func__, offset, obj->desc_size);
849 			return -EINVAL;
850 		}
851 		size -= sizeof(struct ffa_comp_mrd);
852 
853 		count = size / sizeof(struct ffa_cons_mrd);
854 
855 		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
856 
857 		if (comp == NULL) {
858 			WARN("%s: invalid comp_mrd offset\n", __func__);
859 			return -EINVAL;
860 		}
861 
862 		if (comp->address_range_count != count) {
863 			WARN("%s: invalid object, desc count %u != %zu\n",
864 			     __func__, comp->address_range_count, count);
865 			return -EINVAL;
866 		}
867 
868 		expected_size = offset + sizeof(*comp) +
869 				spmc_shmem_obj_ffa_constituent_size(obj,
870 								    ffa_version);
871 
872 		if (expected_size != obj->desc_size) {
873 			WARN("%s: invalid object, computed size %zu != size %zu\n",
874 			       __func__, expected_size, obj->desc_size);
875 			return -EINVAL;
876 		}
877 
878 		if (obj->desc_filled < obj->desc_size) {
879 			/*
880 			 * The whole descriptor has not yet been received.
881 			 * Skip final checks.
882 			 */
883 			return 0;
884 		}
885 
886 		/*
887 		 * The offset provided to the composite memory region descriptor
888 		 * should be consistent across endpoint descriptors. Store the
889 		 * first entry and compare against subsequent entries.
890 		 */
891 		if (comp_mrd_offset == 0) {
892 			comp_mrd_offset = offset;
893 		} else {
894 			if (comp_mrd_offset != offset) {
895 				ERROR("%s: mismatching offsets provided, %u != %u\n",
896 				       __func__, offset, comp_mrd_offset);
897 				return -EINVAL;
898 			}
899 		}
900 
901 		total_page_count = 0;
902 
903 		for (size_t i = 0; i < count; i++) {
904 			total_page_count +=
905 				comp->address_range_array[i].page_count;
906 		}
907 		if (comp->total_page_count != total_page_count) {
908 			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
909 			     __func__, comp->total_page_count,
910 			total_page_count);
911 			return -EINVAL;
912 		}
913 	}
914 	return 0;
915 }
916 
917 /**
918  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
919  *				regions that are currently involved with an
920  *				existing memory transactions. This implies that
921  *				the memory is not in a valid state for lending.
922  * @obj:    Object containing ffa_memory_region_descriptor.
923  *
924  * Return: 0 if object is valid, -EINVAL if invalid memory state.
925  */
926 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
927 				      uint32_t ffa_version)
928 {
929 	size_t obj_offset = 0;
930 	struct spmc_shmem_obj *inflight_obj;
931 
932 	struct ffa_comp_mrd *other_mrd;
933 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
934 								  ffa_version);
935 
936 	if (requested_mrd == NULL) {
937 		return -EINVAL;
938 	}
939 
940 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
941 					       &obj_offset);
942 
943 	while (inflight_obj != NULL) {
944 		/*
945 		 * Don't compare the transaction to itself or to partially
946 		 * transmitted descriptors.
947 		 */
948 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
949 		    (obj->desc_size == obj->desc_filled)) {
950 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
951 							  FFA_VERSION_COMPILED);
952 			if (other_mrd == NULL) {
953 				return -EINVAL;
954 			}
955 			if (overlapping_memory_regions(requested_mrd,
956 						       other_mrd)) {
957 				return -EINVAL;
958 			}
959 		}
960 
961 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
962 						       &obj_offset);
963 	}
964 	return 0;
965 }
966 
967 static long spmc_ffa_fill_desc(struct mailbox *mbox,
968 			       struct spmc_shmem_obj *obj,
969 			       uint32_t fragment_length,
970 			       ffa_mtd_flag32_t mtd_flag,
971 			       uint32_t ffa_version,
972 			       void *smc_handle)
973 {
974 	int ret;
975 	size_t emad_size;
976 	uint32_t handle_low;
977 	uint32_t handle_high;
978 	struct ffa_emad_v1_0 *emad;
979 	struct ffa_emad_v1_0 *other_emad;
980 
981 	if (mbox->rxtx_page_count == 0U) {
982 		WARN("%s: buffer pair not registered.\n", __func__);
983 		ret = FFA_ERROR_INVALID_PARAMETER;
984 		goto err_arg;
985 	}
986 
987 	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
988 		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
989 		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
990 		ret = FFA_ERROR_INVALID_PARAMETER;
991 		goto err_arg;
992 	}
993 
994 	if (fragment_length > obj->desc_size - obj->desc_filled) {
995 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
996 		     fragment_length, obj->desc_size - obj->desc_filled);
997 		ret = FFA_ERROR_INVALID_PARAMETER;
998 		goto err_arg;
999 	}
1000 
1001 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1002 	       (uint8_t *) mbox->tx_buffer, fragment_length);
1003 
1004 	/* Ensure that the sender ID resides in the normal world. */
1005 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1006 		WARN("%s: Invalid sender ID 0x%x.\n",
1007 		     __func__, obj->desc.sender_id);
1008 		ret = FFA_ERROR_DENIED;
1009 		goto err_arg;
1010 	}
1011 
1012 	/* Ensure the NS bit is set to 0. */
1013 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1014 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1015 		ret = FFA_ERROR_INVALID_PARAMETER;
1016 		goto err_arg;
1017 	}
1018 
1019 	/*
1020 	 * We don't currently support any optional flags so ensure none are
1021 	 * requested.
1022 	 */
1023 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
1024 	    (obj->desc.flags != mtd_flag)) {
1025 		WARN("%s: invalid memory transaction flags %u != %u\n",
1026 		     __func__, obj->desc.flags, mtd_flag);
1027 		ret = FFA_ERROR_INVALID_PARAMETER;
1028 		goto err_arg;
1029 	}
1030 
1031 	if (obj->desc_filled == 0U) {
1032 		/* First fragment, descriptor header has been copied */
1033 		ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1034 					      fragment_length, obj->desc_size);
1035 		if (ret != 0) {
1036 			goto err_bad_desc;
1037 		}
1038 
1039 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1040 		obj->desc.flags |= mtd_flag;
1041 	}
1042 
1043 	obj->desc_filled += fragment_length;
1044 	ret = spmc_shmem_check_obj(obj, ffa_version);
1045 	if (ret != 0) {
1046 		ret = FFA_ERROR_INVALID_PARAMETER;
1047 		goto err_bad_desc;
1048 	}
1049 
1050 	handle_low = (uint32_t)obj->desc.handle;
1051 	handle_high = obj->desc.handle >> 32;
1052 
1053 	if (obj->desc_filled != obj->desc_size) {
1054 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1055 			 handle_high, obj->desc_filled,
1056 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1057 	}
1058 
1059 	/* The full descriptor has been received, perform any final checks. */
1060 
1061 	/*
1062 	 * If a partition ID resides in the secure world validate that the
1063 	 * partition ID is for a known partition. Ignore any partition ID
1064 	 * belonging to the normal world as it is assumed the Hypervisor will
1065 	 * have validated these.
1066 	 */
1067 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
1068 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1069 					       &emad_size);
1070 
1071 		ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
1072 
1073 		if (ffa_is_secure_world_id(ep_id)) {
1074 			if (spmc_get_sp_ctx(ep_id) == NULL) {
1075 				WARN("%s: Invalid receiver id 0x%x\n",
1076 				     __func__, ep_id);
1077 				ret = FFA_ERROR_INVALID_PARAMETER;
1078 				goto err_bad_desc;
1079 			}
1080 		}
1081 	}
1082 
1083 	/* Ensure partition IDs are not duplicated. */
1084 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
1085 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1086 					       &emad_size);
1087 
1088 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
1089 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1090 							     ffa_version,
1091 							     &emad_size);
1092 
1093 			if (emad->mapd.endpoint_id ==
1094 				other_emad->mapd.endpoint_id) {
1095 				WARN("%s: Duplicated endpoint id 0x%x\n",
1096 				     __func__, emad->mapd.endpoint_id);
1097 				ret = FFA_ERROR_INVALID_PARAMETER;
1098 				goto err_bad_desc;
1099 			}
1100 		}
1101 	}
1102 
1103 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1104 	if (ret) {
1105 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1106 		ret = FFA_ERROR_INVALID_PARAMETER;
1107 		goto err_bad_desc;
1108 	}
1109 
1110 	/*
1111 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1112 	 * the descriptor format to use the v1.1 structures.
1113 	 */
1114 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1115 		struct spmc_shmem_obj *v1_1_obj;
1116 		uint64_t mem_handle;
1117 
1118 		/* Calculate the size that the v1.1 descriptor will required. */
1119 		size_t v1_1_desc_size =
1120 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1121 						      obj->desc_size);
1122 
1123 		if (v1_1_desc_size == 0U) {
1124 			ERROR("%s: cannot determine size of descriptor.\n",
1125 			      __func__);
1126 			goto err_arg;
1127 		}
1128 
1129 		/* Get a new obj to store the v1.1 descriptor. */
1130 		v1_1_obj =
1131 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1132 
1133 		if (!v1_1_obj) {
1134 			ret = FFA_ERROR_NO_MEMORY;
1135 			goto err_arg;
1136 		}
1137 
1138 		/* Perform the conversion from v1.0 to v1.1. */
1139 		v1_1_obj->desc_size = v1_1_desc_size;
1140 		v1_1_obj->desc_filled = v1_1_desc_size;
1141 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1142 			ERROR("%s: Could not convert mtd!\n", __func__);
1143 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1144 			goto err_arg;
1145 		}
1146 
1147 		/*
1148 		 * We're finished with the v1.0 descriptor so free it
1149 		 * and continue our checks with the new v1.1 descriptor.
1150 		 */
1151 		mem_handle = obj->desc.handle;
1152 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1153 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1154 		if (obj == NULL) {
1155 			ERROR("%s: Failed to find converted descriptor.\n",
1156 			     __func__);
1157 			ret = FFA_ERROR_INVALID_PARAMETER;
1158 			return spmc_ffa_error_return(smc_handle, ret);
1159 		}
1160 	}
1161 
1162 	/* Allow for platform specific operations to be performed. */
1163 	ret = plat_spmc_shmem_begin(&obj->desc);
1164 	if (ret != 0) {
1165 		goto err_arg;
1166 	}
1167 
1168 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1169 		 0, 0, 0);
1170 
1171 err_bad_desc:
1172 err_arg:
1173 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1174 	return spmc_ffa_error_return(smc_handle, ret);
1175 }
1176 
1177 /**
1178  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1179  * @client:             Client state.
1180  * @total_length:       Total length of shared memory descriptor.
1181  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1182  *                      this call.
1183  * @address:            Not supported, must be 0.
1184  * @page_count:         Not supported, must be 0.
1185  * @smc_handle:         Handle passed to smc call. Used to return
1186  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1187  *
1188  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1189  * to share or lend memory from non-secure os to secure os (with no stream
1190  * endpoints).
1191  *
1192  * Return: 0 on success, error code on failure.
1193  */
1194 long spmc_ffa_mem_send(uint32_t smc_fid,
1195 			bool secure_origin,
1196 			uint64_t total_length,
1197 			uint32_t fragment_length,
1198 			uint64_t address,
1199 			uint32_t page_count,
1200 			void *cookie,
1201 			void *handle,
1202 			uint64_t flags)
1203 
1204 {
1205 	long ret;
1206 	struct spmc_shmem_obj *obj;
1207 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1208 	ffa_mtd_flag32_t mtd_flag;
1209 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1210 	size_t min_desc_size;
1211 
1212 	if (address != 0U || page_count != 0U) {
1213 		WARN("%s: custom memory region for message not supported.\n",
1214 		     __func__);
1215 		return spmc_ffa_error_return(handle,
1216 					     FFA_ERROR_INVALID_PARAMETER);
1217 	}
1218 
1219 	if (secure_origin) {
1220 		WARN("%s: unsupported share direction.\n", __func__);
1221 		return spmc_ffa_error_return(handle,
1222 					     FFA_ERROR_INVALID_PARAMETER);
1223 	}
1224 
1225 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1226 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1227 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1228 		min_desc_size = sizeof(struct ffa_mtd);
1229 	} else {
1230 		WARN("%s: bad FF-A version.\n", __func__);
1231 		return spmc_ffa_error_return(handle,
1232 					     FFA_ERROR_INVALID_PARAMETER);
1233 	}
1234 
1235 	/* Check if the descriptor is too small for the FF-A version. */
1236 	if (fragment_length < min_desc_size) {
1237 		WARN("%s: bad first fragment size %u < %zu\n",
1238 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1239 		return spmc_ffa_error_return(handle,
1240 					     FFA_ERROR_INVALID_PARAMETER);
1241 	}
1242 
1243 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1244 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1245 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1246 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1247 	} else {
1248 		WARN("%s: invalid memory management operation.\n", __func__);
1249 		return spmc_ffa_error_return(handle,
1250 					     FFA_ERROR_INVALID_PARAMETER);
1251 	}
1252 
1253 	spin_lock(&spmc_shmem_obj_state.lock);
1254 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1255 	if (obj == NULL) {
1256 		ret = FFA_ERROR_NO_MEMORY;
1257 		goto err_unlock;
1258 	}
1259 
1260 	spin_lock(&mbox->lock);
1261 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1262 				 ffa_version, handle);
1263 	spin_unlock(&mbox->lock);
1264 
1265 	spin_unlock(&spmc_shmem_obj_state.lock);
1266 	return ret;
1267 
1268 err_unlock:
1269 	spin_unlock(&spmc_shmem_obj_state.lock);
1270 	return spmc_ffa_error_return(handle, ret);
1271 }
1272 
1273 /**
1274  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1275  * @client:             Client state.
1276  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1277  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1278  * @fragment_length:    Length of fragments transmitted.
1279  * @sender_id:          Vmid of sender in bits [31:16]
1280  * @smc_handle:         Handle passed to smc call. Used to return
1281  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1282  *
1283  * Return: @smc_handle on success, error code on failure.
1284  */
1285 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1286 			  bool secure_origin,
1287 			  uint64_t handle_low,
1288 			  uint64_t handle_high,
1289 			  uint32_t fragment_length,
1290 			  uint32_t sender_id,
1291 			  void *cookie,
1292 			  void *handle,
1293 			  uint64_t flags)
1294 {
1295 	long ret;
1296 	uint32_t desc_sender_id;
1297 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1298 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1299 
1300 	struct spmc_shmem_obj *obj;
1301 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1302 
1303 	spin_lock(&spmc_shmem_obj_state.lock);
1304 
1305 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1306 	if (obj == NULL) {
1307 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1308 		     __func__, mem_handle);
1309 		ret = FFA_ERROR_INVALID_PARAMETER;
1310 		goto err_unlock;
1311 	}
1312 
1313 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1314 	if (sender_id != desc_sender_id) {
1315 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1316 		     sender_id, desc_sender_id);
1317 		ret = FFA_ERROR_INVALID_PARAMETER;
1318 		goto err_unlock;
1319 	}
1320 
1321 	if (obj->desc_filled == obj->desc_size) {
1322 		WARN("%s: object desc already filled, %zu\n", __func__,
1323 		     obj->desc_filled);
1324 		ret = FFA_ERROR_INVALID_PARAMETER;
1325 		goto err_unlock;
1326 	}
1327 
1328 	spin_lock(&mbox->lock);
1329 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1330 				 handle);
1331 	spin_unlock(&mbox->lock);
1332 
1333 	spin_unlock(&spmc_shmem_obj_state.lock);
1334 	return ret;
1335 
1336 err_unlock:
1337 	spin_unlock(&spmc_shmem_obj_state.lock);
1338 	return spmc_ffa_error_return(handle, ret);
1339 }
1340 
1341 /**
1342  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1343  *				      if the caller implements a version greater
1344  *				      than FF-A 1.0 or if they have requested
1345  *				      the functionality.
1346  *				      TODO: We are assuming that the caller is
1347  *				      an SP. To support retrieval from the
1348  *				      normal world this function will need to be
1349  *				      expanded accordingly.
1350  * @resp:       Descriptor populated in callers RX buffer.
1351  * @sp_ctx:     Context of the calling SP.
1352  */
1353 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1354 			 struct secure_partition_desc *sp_ctx)
1355 {
1356 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1357 	    sp_ctx->ns_bit_requested) {
1358 		/*
1359 		 * Currently memory senders must reside in the normal
1360 		 * world, and we do not have the functionlaity to change
1361 		 * the state of memory dynamically. Therefore we can always set
1362 		 * the NS bit to 1.
1363 		 */
1364 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1365 	}
1366 }
1367 
1368 /**
1369  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1370  * @smc_fid:            FID of SMC
1371  * @total_length:       Total length of retrieve request descriptor if this is
1372  *                      the first call. Otherwise (unsupported) must be 0.
1373  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1374  *                      in this call. Only @fragment_length == @length is
1375  *                      supported by this implementation.
1376  * @address:            Not supported, must be 0.
1377  * @page_count:         Not supported, must be 0.
1378  * @smc_handle:         Handle passed to smc call. Used to return
1379  *                      FFA_MEM_RETRIEVE_RESP.
1380  *
1381  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1382  * Used by secure os to retrieve memory already shared by non-secure os.
1383  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1384  * the client must call FFA_MEM_FRAG_RX until the full response has been
1385  * received.
1386  *
1387  * Return: @handle on success, error code on failure.
1388  */
1389 long
1390 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1391 			  bool secure_origin,
1392 			  uint32_t total_length,
1393 			  uint32_t fragment_length,
1394 			  uint64_t address,
1395 			  uint32_t page_count,
1396 			  void *cookie,
1397 			  void *handle,
1398 			  uint64_t flags)
1399 {
1400 	int ret;
1401 	size_t buf_size;
1402 	size_t copy_size = 0;
1403 	size_t min_desc_size;
1404 	size_t out_desc_size = 0;
1405 
1406 	/*
1407 	 * Currently we are only accessing fields that are the same in both the
1408 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1409 	 * here. We only need validate against the appropriate struct size.
1410 	 */
1411 	struct ffa_mtd *resp;
1412 	const struct ffa_mtd *req;
1413 	struct spmc_shmem_obj *obj = NULL;
1414 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1415 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1416 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1417 
1418 	if (!secure_origin) {
1419 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1420 		return spmc_ffa_error_return(handle,
1421 					     FFA_ERROR_INVALID_PARAMETER);
1422 	}
1423 
1424 	if (address != 0U || page_count != 0U) {
1425 		WARN("%s: custom memory region not supported.\n", __func__);
1426 		return spmc_ffa_error_return(handle,
1427 					     FFA_ERROR_INVALID_PARAMETER);
1428 	}
1429 
1430 	spin_lock(&mbox->lock);
1431 
1432 	req = mbox->tx_buffer;
1433 	resp = mbox->rx_buffer;
1434 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1435 
1436 	if (mbox->rxtx_page_count == 0U) {
1437 		WARN("%s: buffer pair not registered.\n", __func__);
1438 		ret = FFA_ERROR_INVALID_PARAMETER;
1439 		goto err_unlock_mailbox;
1440 	}
1441 
1442 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1443 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1444 		ret = FFA_ERROR_DENIED;
1445 		goto err_unlock_mailbox;
1446 	}
1447 
1448 	if (fragment_length != total_length) {
1449 		WARN("%s: fragmented retrieve request not supported.\n",
1450 		     __func__);
1451 		ret = FFA_ERROR_INVALID_PARAMETER;
1452 		goto err_unlock_mailbox;
1453 	}
1454 
1455 	if (req->emad_count == 0U) {
1456 		WARN("%s: unsupported attribute desc count %u.\n",
1457 		     __func__, obj->desc.emad_count);
1458 		ret = FFA_ERROR_INVALID_PARAMETER;
1459 		goto err_unlock_mailbox;
1460 	}
1461 
1462 	/* Determine the appropriate minimum descriptor size. */
1463 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1464 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1465 	} else {
1466 		min_desc_size = sizeof(struct ffa_mtd);
1467 	}
1468 	if (total_length < min_desc_size) {
1469 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1470 		     min_desc_size);
1471 		ret = FFA_ERROR_INVALID_PARAMETER;
1472 		goto err_unlock_mailbox;
1473 	}
1474 
1475 	spin_lock(&spmc_shmem_obj_state.lock);
1476 
1477 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1478 	if (obj == NULL) {
1479 		ret = FFA_ERROR_INVALID_PARAMETER;
1480 		goto err_unlock_all;
1481 	}
1482 
1483 	if (obj->desc_filled != obj->desc_size) {
1484 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1485 		     __func__, obj->desc_filled, obj->desc_size);
1486 		ret = FFA_ERROR_INVALID_PARAMETER;
1487 		goto err_unlock_all;
1488 	}
1489 
1490 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1491 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1492 		     __func__, req->sender_id, obj->desc.sender_id);
1493 		ret = FFA_ERROR_INVALID_PARAMETER;
1494 		goto err_unlock_all;
1495 	}
1496 
1497 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1498 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1499 		     __func__, req->tag, obj->desc.tag);
1500 		ret = FFA_ERROR_INVALID_PARAMETER;
1501 		goto err_unlock_all;
1502 	}
1503 
1504 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1505 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1506 		     __func__, req->emad_count, obj->desc.emad_count);
1507 		ret = FFA_ERROR_INVALID_PARAMETER;
1508 		goto err_unlock_all;
1509 	}
1510 
1511 	/* Ensure the NS bit is set to 0 in the request. */
1512 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1513 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1514 		ret = FFA_ERROR_INVALID_PARAMETER;
1515 		goto err_unlock_all;
1516 	}
1517 
1518 	if (req->flags != 0U) {
1519 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1520 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1521 			/*
1522 			 * If the retrieve request specifies the memory
1523 			 * transaction ensure it matches what we expect.
1524 			 */
1525 			WARN("%s: wrong mem transaction flags %x != %x\n",
1526 			__func__, req->flags, obj->desc.flags);
1527 			ret = FFA_ERROR_INVALID_PARAMETER;
1528 			goto err_unlock_all;
1529 		}
1530 
1531 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1532 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1533 			/*
1534 			 * Current implementation does not support donate and
1535 			 * it supports no other flags.
1536 			 */
1537 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1538 			ret = FFA_ERROR_INVALID_PARAMETER;
1539 			goto err_unlock_all;
1540 		}
1541 	}
1542 
1543 	/* Validate the caller is a valid participant. */
1544 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1545 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1546 			__func__, sp_ctx->sp_id);
1547 		ret = FFA_ERROR_INVALID_PARAMETER;
1548 		goto err_unlock_all;
1549 	}
1550 
1551 	/* Validate that the provided emad offset and structure is valid.*/
1552 	for (size_t i = 0; i < req->emad_count; i++) {
1553 		size_t emad_size;
1554 		struct ffa_emad_v1_0 *emad;
1555 
1556 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1557 					       &emad_size);
1558 
1559 		if ((uintptr_t) emad >= (uintptr_t)
1560 					((uint8_t *) req + total_length)) {
1561 			WARN("Invalid emad access.\n");
1562 			ret = FFA_ERROR_INVALID_PARAMETER;
1563 			goto err_unlock_all;
1564 		}
1565 	}
1566 
1567 	/*
1568 	 * Validate all the endpoints match in the case of multiple
1569 	 * borrowers. We don't mandate that the order of the borrowers
1570 	 * must match in the descriptors therefore check to see if the
1571 	 * endpoints match in any order.
1572 	 */
1573 	for (size_t i = 0; i < req->emad_count; i++) {
1574 		bool found = false;
1575 		size_t emad_size;
1576 		struct ffa_emad_v1_0 *emad;
1577 		struct ffa_emad_v1_0 *other_emad;
1578 
1579 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1580 					       &emad_size);
1581 
1582 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1583 			other_emad = spmc_shmem_obj_get_emad(
1584 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1585 					&emad_size);
1586 
1587 			if (req->emad_count &&
1588 			    emad->mapd.endpoint_id ==
1589 			    other_emad->mapd.endpoint_id) {
1590 				found = true;
1591 				break;
1592 			}
1593 		}
1594 
1595 		if (!found) {
1596 			WARN("%s: invalid receiver id (0x%x).\n",
1597 			     __func__, emad->mapd.endpoint_id);
1598 			ret = FFA_ERROR_INVALID_PARAMETER;
1599 			goto err_unlock_all;
1600 		}
1601 	}
1602 
1603 	mbox->state = MAILBOX_STATE_FULL;
1604 
1605 	if (req->emad_count != 0U) {
1606 		obj->in_use++;
1607 	}
1608 
1609 	/*
1610 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1611 	 * directly.
1612 	 */
1613 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1614 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1615 							&copy_size,
1616 							&out_desc_size);
1617 		if (ret != 0U) {
1618 			ERROR("%s: Failed to process descriptor.\n", __func__);
1619 			goto err_unlock_all;
1620 		}
1621 	} else {
1622 		copy_size = MIN(obj->desc_size, buf_size);
1623 		out_desc_size = obj->desc_size;
1624 
1625 		memcpy(resp, &obj->desc, copy_size);
1626 	}
1627 
1628 	/* Set the NS bit in the response if applicable. */
1629 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1630 
1631 	spin_unlock(&spmc_shmem_obj_state.lock);
1632 	spin_unlock(&mbox->lock);
1633 
1634 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1635 		 copy_size, 0, 0, 0, 0, 0);
1636 
1637 err_unlock_all:
1638 	spin_unlock(&spmc_shmem_obj_state.lock);
1639 err_unlock_mailbox:
1640 	spin_unlock(&mbox->lock);
1641 	return spmc_ffa_error_return(handle, ret);
1642 }
1643 
1644 /**
1645  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1646  * @client:             Client state.
1647  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1648  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1649  * @fragment_offset:    Byte offset in descriptor to resume at.
1650  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1651  *                      hypervisor. 0 otherwise.
1652  * @smc_handle:         Handle passed to smc call. Used to return
1653  *                      FFA_MEM_FRAG_TX.
1654  *
1655  * Return: @smc_handle on success, error code on failure.
1656  */
1657 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1658 			  bool secure_origin,
1659 			  uint32_t handle_low,
1660 			  uint32_t handle_high,
1661 			  uint32_t fragment_offset,
1662 			  uint32_t sender_id,
1663 			  void *cookie,
1664 			  void *handle,
1665 			  uint64_t flags)
1666 {
1667 	int ret;
1668 	void *src;
1669 	size_t buf_size;
1670 	size_t copy_size;
1671 	size_t full_copy_size;
1672 	uint32_t desc_sender_id;
1673 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1674 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1675 	struct spmc_shmem_obj *obj;
1676 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1677 
1678 	if (!secure_origin) {
1679 		WARN("%s: can only be called from swld.\n",
1680 		     __func__);
1681 		return spmc_ffa_error_return(handle,
1682 					     FFA_ERROR_INVALID_PARAMETER);
1683 	}
1684 
1685 	spin_lock(&spmc_shmem_obj_state.lock);
1686 
1687 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1688 	if (obj == NULL) {
1689 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1690 		     __func__, mem_handle);
1691 		ret = FFA_ERROR_INVALID_PARAMETER;
1692 		goto err_unlock_shmem;
1693 	}
1694 
1695 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1696 	if (sender_id != 0U && sender_id != desc_sender_id) {
1697 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1698 		     sender_id, desc_sender_id);
1699 		ret = FFA_ERROR_INVALID_PARAMETER;
1700 		goto err_unlock_shmem;
1701 	}
1702 
1703 	if (fragment_offset >= obj->desc_size) {
1704 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1705 		     __func__, fragment_offset, obj->desc_size);
1706 		ret = FFA_ERROR_INVALID_PARAMETER;
1707 		goto err_unlock_shmem;
1708 	}
1709 
1710 	spin_lock(&mbox->lock);
1711 
1712 	if (mbox->rxtx_page_count == 0U) {
1713 		WARN("%s: buffer pair not registered.\n", __func__);
1714 		ret = FFA_ERROR_INVALID_PARAMETER;
1715 		goto err_unlock_all;
1716 	}
1717 
1718 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1719 		WARN("%s: RX Buffer is full!\n", __func__);
1720 		ret = FFA_ERROR_DENIED;
1721 		goto err_unlock_all;
1722 	}
1723 
1724 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1725 
1726 	mbox->state = MAILBOX_STATE_FULL;
1727 
1728 	/*
1729 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1730 	 * directly.
1731 	 */
1732 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1733 		size_t out_desc_size;
1734 
1735 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1736 							buf_size,
1737 							fragment_offset,
1738 							&copy_size,
1739 							&out_desc_size);
1740 		if (ret != 0U) {
1741 			ERROR("%s: Failed to process descriptor.\n", __func__);
1742 			goto err_unlock_all;
1743 		}
1744 	} else {
1745 		full_copy_size = obj->desc_size - fragment_offset;
1746 		copy_size = MIN(full_copy_size, buf_size);
1747 
1748 		src = &obj->desc;
1749 
1750 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1751 	}
1752 
1753 	spin_unlock(&mbox->lock);
1754 	spin_unlock(&spmc_shmem_obj_state.lock);
1755 
1756 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1757 		 copy_size, sender_id, 0, 0, 0);
1758 
1759 err_unlock_all:
1760 	spin_unlock(&mbox->lock);
1761 err_unlock_shmem:
1762 	spin_unlock(&spmc_shmem_obj_state.lock);
1763 	return spmc_ffa_error_return(handle, ret);
1764 }
1765 
1766 /**
1767  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1768  * @client:             Client state.
1769  *
1770  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1771  * Used by secure os release previously shared memory to non-secure os.
1772  *
1773  * The handle to release must be in the client's (secure os's) transmit buffer.
1774  *
1775  * Return: 0 on success, error code on failure.
1776  */
1777 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1778 			    bool secure_origin,
1779 			    uint32_t handle_low,
1780 			    uint32_t handle_high,
1781 			    uint32_t fragment_offset,
1782 			    uint32_t sender_id,
1783 			    void *cookie,
1784 			    void *handle,
1785 			    uint64_t flags)
1786 {
1787 	int ret;
1788 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1789 	struct spmc_shmem_obj *obj;
1790 	const struct ffa_mem_relinquish_descriptor *req;
1791 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1792 
1793 	if (!secure_origin) {
1794 		WARN("%s: unsupported relinquish direction.\n", __func__);
1795 		return spmc_ffa_error_return(handle,
1796 					     FFA_ERROR_INVALID_PARAMETER);
1797 	}
1798 
1799 	spin_lock(&mbox->lock);
1800 
1801 	if (mbox->rxtx_page_count == 0U) {
1802 		WARN("%s: buffer pair not registered.\n", __func__);
1803 		ret = FFA_ERROR_INVALID_PARAMETER;
1804 		goto err_unlock_mailbox;
1805 	}
1806 
1807 	req = mbox->tx_buffer;
1808 
1809 	if (req->flags != 0U) {
1810 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1811 		ret = FFA_ERROR_INVALID_PARAMETER;
1812 		goto err_unlock_mailbox;
1813 	}
1814 
1815 	if (req->endpoint_count == 0) {
1816 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1817 		ret = FFA_ERROR_INVALID_PARAMETER;
1818 		goto err_unlock_mailbox;
1819 	}
1820 
1821 	spin_lock(&spmc_shmem_obj_state.lock);
1822 
1823 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1824 	if (obj == NULL) {
1825 		ret = FFA_ERROR_INVALID_PARAMETER;
1826 		goto err_unlock_all;
1827 	}
1828 
1829 	/*
1830 	 * Validate the endpoint ID was populated correctly. We don't currently
1831 	 * support proxy endpoints so the endpoint count should always be 1.
1832 	 */
1833 	if (req->endpoint_count != 1U) {
1834 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1835 		     req->endpoint_count);
1836 		ret = FFA_ERROR_INVALID_PARAMETER;
1837 		goto err_unlock_all;
1838 	}
1839 
1840 	/* Validate provided endpoint ID matches the partition ID. */
1841 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1842 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1843 		     req->endpoint_array[0], sp_ctx->sp_id);
1844 		ret = FFA_ERROR_INVALID_PARAMETER;
1845 		goto err_unlock_all;
1846 	}
1847 
1848 	/* Validate the caller is a valid participant. */
1849 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1850 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1851 			__func__, req->endpoint_array[0]);
1852 		ret = FFA_ERROR_INVALID_PARAMETER;
1853 		goto err_unlock_all;
1854 	}
1855 
1856 	if (obj->in_use == 0U) {
1857 		ret = FFA_ERROR_INVALID_PARAMETER;
1858 		goto err_unlock_all;
1859 	}
1860 	obj->in_use--;
1861 
1862 	spin_unlock(&spmc_shmem_obj_state.lock);
1863 	spin_unlock(&mbox->lock);
1864 
1865 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1866 
1867 err_unlock_all:
1868 	spin_unlock(&spmc_shmem_obj_state.lock);
1869 err_unlock_mailbox:
1870 	spin_unlock(&mbox->lock);
1871 	return spmc_ffa_error_return(handle, ret);
1872 }
1873 
1874 /**
1875  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1876  * @client:         Client state.
1877  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1878  * @handle_high:    Unique handle of shared memory object to reclaim.
1879  *                  Bit[63:32].
1880  * @flags:          Unsupported, ignored.
1881  *
1882  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1883  * Used by non-secure os reclaim memory previously shared with secure os.
1884  *
1885  * Return: 0 on success, error code on failure.
1886  */
1887 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1888 			 bool secure_origin,
1889 			 uint32_t handle_low,
1890 			 uint32_t handle_high,
1891 			 uint32_t mem_flags,
1892 			 uint64_t x4,
1893 			 void *cookie,
1894 			 void *handle,
1895 			 uint64_t flags)
1896 {
1897 	int ret;
1898 	struct spmc_shmem_obj *obj;
1899 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1900 
1901 	if (secure_origin) {
1902 		WARN("%s: unsupported reclaim direction.\n", __func__);
1903 		return spmc_ffa_error_return(handle,
1904 					     FFA_ERROR_INVALID_PARAMETER);
1905 	}
1906 
1907 	if (mem_flags != 0U) {
1908 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1909 		return spmc_ffa_error_return(handle,
1910 					     FFA_ERROR_INVALID_PARAMETER);
1911 	}
1912 
1913 	spin_lock(&spmc_shmem_obj_state.lock);
1914 
1915 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1916 	if (obj == NULL) {
1917 		ret = FFA_ERROR_INVALID_PARAMETER;
1918 		goto err_unlock;
1919 	}
1920 	if (obj->in_use != 0U) {
1921 		ret = FFA_ERROR_DENIED;
1922 		goto err_unlock;
1923 	}
1924 
1925 	if (obj->desc_filled != obj->desc_size) {
1926 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1927 		     __func__, obj->desc_filled, obj->desc_size);
1928 		ret = FFA_ERROR_INVALID_PARAMETER;
1929 		goto err_unlock;
1930 	}
1931 
1932 	/* Allow for platform specific operations to be performed. */
1933 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1934 	if (ret != 0) {
1935 		goto err_unlock;
1936 	}
1937 
1938 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1939 	spin_unlock(&spmc_shmem_obj_state.lock);
1940 
1941 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1942 
1943 err_unlock:
1944 	spin_unlock(&spmc_shmem_obj_state.lock);
1945 	return spmc_ffa_error_return(handle, ret);
1946 }
1947