xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision 43318e4a4dcc79935150de75fe5dccbb615f4719)
1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 
10 #include <common/debug.h>
11 #include <common/runtime_svc.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <services/ffa_svc.h>
16 #include "spmc.h"
17 #include "spmc_shared_mem.h"
18 
19 #include <platform_def.h>
20 
21 /**
22  * struct spmc_shmem_obj - Shared memory object.
23  * @desc_size:      Size of @desc.
24  * @desc_filled:    Size of @desc already received.
25  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
26  *                  without a matching ffa_mem_relinquish call.
27  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
28  */
29 struct spmc_shmem_obj {
30 	size_t desc_size;
31 	size_t desc_filled;
32 	size_t in_use;
33 	struct ffa_mtd desc;
34 };
35 
36 /*
37  * Declare our data structure to store the metadata of memory share requests.
38  * The main datastore is allocated on a per platform basis to ensure enough
39  * storage can be made available.
40  * The address of the data store will be populated by the SPMC during its
41  * initialization.
42  */
43 
44 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 	/* Set start value for handle so top 32 bits are needed quickly. */
46 	.next_handle = 0xffffffc0U,
47 };
48 
49 /**
50  * spmc_shmem_obj_size - Convert from descriptor size to object size.
51  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
52  *
53  * Return: Size of struct spmc_shmem_obj object.
54  */
55 static size_t spmc_shmem_obj_size(size_t desc_size)
56 {
57 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
58 }
59 
60 /**
61  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62  * @state:      Global state.
63  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
64  *              allocated object will hold.
65  *
66  * Return: Pointer to newly allocated object, or %NULL if there not enough space
67  *         left. The returned pointer is only valid while @state is locked, to
68  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
69  *         called.
70  */
71 static struct spmc_shmem_obj *
72 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73 {
74 	struct spmc_shmem_obj *obj;
75 	size_t free = state->data_size - state->allocated;
76 	size_t obj_size;
77 
78 	if (state->data == NULL) {
79 		ERROR("Missing shmem datastore!\n");
80 		return NULL;
81 	}
82 
83 	obj_size = spmc_shmem_obj_size(desc_size);
84 
85 	/* Ensure the obj size has not overflowed. */
86 	if (obj_size < desc_size) {
87 		WARN("%s(0x%zx) desc_size overflow\n",
88 		     __func__, desc_size);
89 		return NULL;
90 	}
91 
92 	if (obj_size > free) {
93 		WARN("%s(0x%zx) failed, free 0x%zx\n",
94 		     __func__, desc_size, free);
95 		return NULL;
96 	}
97 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
98 	obj->desc = (struct ffa_mtd) {0};
99 	obj->desc_size = desc_size;
100 	obj->desc_filled = 0;
101 	obj->in_use = 0;
102 	state->allocated += obj_size;
103 	return obj;
104 }
105 
106 /**
107  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
108  * @state:      Global state.
109  * @obj:        Object to free.
110  *
111  * Release memory used by @obj. Other objects may move, so on return all
112  * pointers to struct spmc_shmem_obj object should be considered invalid, not
113  * just @obj.
114  *
115  * The current implementation always compacts the remaining objects to simplify
116  * the allocator and to avoid fragmentation.
117  */
118 
119 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
120 				  struct spmc_shmem_obj *obj)
121 {
122 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
123 	uint8_t *shift_dest = (uint8_t *)obj;
124 	uint8_t *shift_src = shift_dest + free_size;
125 	size_t shift_size = state->allocated - (shift_src - state->data);
126 
127 	if (shift_size != 0U) {
128 		memmove(shift_dest, shift_src, shift_size);
129 	}
130 	state->allocated -= free_size;
131 }
132 
133 /**
134  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
135  * @state:      Global state.
136  * @handle:     Unique handle of object to return.
137  *
138  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
139  *         %NULL, if not object in @state->data has a matching handle.
140  */
141 static struct spmc_shmem_obj *
142 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
143 {
144 	uint8_t *curr = state->data;
145 
146 	while (curr - state->data < state->allocated) {
147 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
148 
149 		if (obj->desc.handle == handle) {
150 			return obj;
151 		}
152 		curr += spmc_shmem_obj_size(obj->desc_size);
153 	}
154 	return NULL;
155 }
156 
157 /**
158  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
159  * @offset:     Offset used to track which objects have previously been
160  *              returned.
161  *
162  * Return: the next struct spmc_shmem_obj_state object from the provided
163  *	   offset.
164  *	   %NULL, if there are no more objects.
165  */
166 static struct spmc_shmem_obj *
167 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
168 {
169 	uint8_t *curr = state->data + *offset;
170 
171 	if (curr - state->data < state->allocated) {
172 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
173 
174 		*offset += spmc_shmem_obj_size(obj->desc_size);
175 
176 		return obj;
177 	}
178 	return NULL;
179 }
180 
181 /*******************************************************************************
182  * FF-A memory descriptor helper functions.
183  ******************************************************************************/
184 /**
185  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
186  *                           clients FF-A version.
187  * @desc:         The memory transaction descriptor.
188  * @index:        The index of the emad element to be accessed.
189  * @ffa_version:  FF-A version of the provided structure.
190  * @emad_size:    Will be populated with the size of the returned emad
191  *                descriptor.
192  * Return: A pointer to the requested emad structure.
193  */
194 static void *
195 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
196 			uint32_t ffa_version, size_t *emad_size)
197 {
198 	uint8_t *emad;
199 
200 	assert(index < desc->emad_count);
201 
202 	/*
203 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
204 	 * format, otherwise assume it is a v1.1 format.
205 	 */
206 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
207 		emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
208 		*emad_size = sizeof(struct ffa_emad_v1_0);
209 	} else {
210 		assert(is_aligned(desc->emad_offset, 16));
211 		emad = ((uint8_t *) desc + desc->emad_offset);
212 		*emad_size = desc->emad_size;
213 	}
214 
215 	assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
216 	return (emad + (*emad_size * index));
217 }
218 
219 /**
220  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
221  *				 FF-A version of the descriptor.
222  * @obj:    Object containing ffa_memory_region_descriptor.
223  *
224  * Return: struct ffa_comp_mrd object corresponding to the composite memory
225  *	   region descriptor.
226  */
227 static struct ffa_comp_mrd *
228 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
229 {
230 	size_t emad_size;
231 	/*
232 	 * The comp_mrd_offset field of the emad descriptor remains consistent
233 	 * between FF-A versions therefore we can use the v1.0 descriptor here
234 	 * in all cases.
235 	 */
236 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
237 							     ffa_version,
238 							     &emad_size);
239 
240 	/* Ensure the composite descriptor offset is aligned. */
241 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
242 		WARN("Unaligned composite memory region descriptor offset.\n");
243 		return NULL;
244 	}
245 
246 	return (struct ffa_comp_mrd *)
247 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
248 }
249 
250 /**
251  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
252  *				a given memory transaction.
253  * @sp_id:      Partition ID to validate.
254  * @obj:        The shared memory object containing the descriptor
255  *              of the memory transaction.
256  * Return: true if ID is valid, else false.
257  */
258 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
259 {
260 	bool found = false;
261 	struct ffa_mtd *desc = &obj->desc;
262 	size_t desc_size = obj->desc_size;
263 
264 	/* Validate the partition is a valid participant. */
265 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
266 		size_t emad_size;
267 		struct ffa_emad_v1_0 *emad;
268 
269 		emad = spmc_shmem_obj_get_emad(desc, i,
270 					       MAKE_FFA_VERSION(1, 1),
271 					       &emad_size);
272 		/*
273 		 * Validate the calculated emad address resides within the
274 		 * descriptor.
275 		 */
276 		if ((emad == NULL) || (uintptr_t) emad >=
277 		    (uintptr_t)((uint8_t *) desc + desc_size)) {
278 			VERBOSE("Invalid emad.\n");
279 			break;
280 		}
281 		if (sp_id == emad->mapd.endpoint_id) {
282 			found = true;
283 			break;
284 		}
285 	}
286 	return found;
287 }
288 
289 /*
290  * Compare two memory regions to determine if any range overlaps with another
291  * ongoing memory transaction.
292  */
293 static bool
294 overlapping_memory_regions(struct ffa_comp_mrd *region1,
295 			   struct ffa_comp_mrd *region2)
296 {
297 	uint64_t region1_start;
298 	uint64_t region1_size;
299 	uint64_t region1_end;
300 	uint64_t region2_start;
301 	uint64_t region2_size;
302 	uint64_t region2_end;
303 
304 	assert(region1 != NULL);
305 	assert(region2 != NULL);
306 
307 	if (region1 == region2) {
308 		return true;
309 	}
310 
311 	/*
312 	 * Check each memory region in the request against existing
313 	 * transactions.
314 	 */
315 	for (size_t i = 0; i < region1->address_range_count; i++) {
316 
317 		region1_start = region1->address_range_array[i].address;
318 		region1_size =
319 			region1->address_range_array[i].page_count *
320 			PAGE_SIZE_4KB;
321 		region1_end = region1_start + region1_size;
322 
323 		for (size_t j = 0; j < region2->address_range_count; j++) {
324 
325 			region2_start = region2->address_range_array[j].address;
326 			region2_size =
327 				region2->address_range_array[j].page_count *
328 				PAGE_SIZE_4KB;
329 			region2_end = region2_start + region2_size;
330 
331 			/* Check if regions are not overlapping. */
332 			if (!((region2_end <= region1_start) ||
333 			      (region1_end <= region2_start))) {
334 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
335 				     region1_start, region1_end,
336 				     region2_start, region2_end);
337 				return true;
338 			}
339 		}
340 	}
341 	return false;
342 }
343 
344 /*******************************************************************************
345  * FF-A v1.0 Memory Descriptor Conversion Helpers.
346  ******************************************************************************/
347 /**
348  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
349  *                                     converted descriptor.
350  * @orig:       The original v1.0 memory transaction descriptor.
351  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
352  *
353  * Return: the size required to store the descriptor store in the v1.1 format.
354  */
355 static size_t
356 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
357 {
358 	size_t size = 0;
359 	struct ffa_comp_mrd *mrd;
360 	struct ffa_emad_v1_0 *emad_array = orig->emad;
361 
362 	/* Get the size of the v1.1 descriptor. */
363 	size += sizeof(struct ffa_mtd);
364 
365 	/* Add the size of the emad descriptors. */
366 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
367 
368 	/* Add the size of the composite mrds. */
369 	size += sizeof(struct ffa_comp_mrd);
370 
371 	/* Add the size of the constituent mrds. */
372 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
373 	      emad_array[0].comp_mrd_offset);
374 
375 	/* Check the calculated address is within the memory descriptor. */
376 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
377 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
378 		return 0;
379 	}
380 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
381 
382 	return size;
383 }
384 
385 /**
386  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
387  *                                     converted descriptor.
388  * @orig:       The original v1.1 memory transaction descriptor.
389  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
390  *
391  * Return: the size required to store the descriptor store in the v1.0 format.
392  */
393 static size_t
394 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
395 {
396 	size_t size = 0;
397 	struct ffa_comp_mrd *mrd;
398 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
399 					   ((uint8_t *) orig +
400 					    orig->emad_offset);
401 
402 	/* Get the size of the v1.0 descriptor. */
403 	size += sizeof(struct ffa_mtd_v1_0);
404 
405 	/* Add the size of the v1.0 emad descriptors. */
406 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
407 
408 	/* Add the size of the composite mrds. */
409 	size += sizeof(struct ffa_comp_mrd);
410 
411 	/* Add the size of the constituent mrds. */
412 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
413 	      emad_array[0].comp_mrd_offset);
414 
415 	/* Check the calculated address is within the memory descriptor. */
416 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
417 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
418 		return 0;
419 	}
420 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
421 
422 	return size;
423 }
424 
425 /**
426  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
427  * @out_obj:	The shared memory object to populate the converted descriptor.
428  * @orig:	The shared memory object containing the v1.0 descriptor.
429  *
430  * Return: true if the conversion is successful else false.
431  */
432 static bool
433 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
434 				     struct spmc_shmem_obj *orig)
435 {
436 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
437 	struct ffa_mtd *out = &out_obj->desc;
438 	struct ffa_emad_v1_0 *emad_array_in;
439 	struct ffa_emad_v1_0 *emad_array_out;
440 	struct ffa_comp_mrd *mrd_in;
441 	struct ffa_comp_mrd *mrd_out;
442 
443 	size_t mrd_in_offset;
444 	size_t mrd_out_offset;
445 	size_t mrd_size = 0;
446 
447 	/* Populate the new descriptor format from the v1.0 struct. */
448 	out->sender_id = mtd_orig->sender_id;
449 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
450 	out->flags = mtd_orig->flags;
451 	out->handle = mtd_orig->handle;
452 	out->tag = mtd_orig->tag;
453 	out->emad_count = mtd_orig->emad_count;
454 	out->emad_size = sizeof(struct ffa_emad_v1_0);
455 
456 	/*
457 	 * We will locate the emad descriptors directly after the ffa_mtd
458 	 * struct. This will be 8-byte aligned.
459 	 */
460 	out->emad_offset = sizeof(struct ffa_mtd);
461 
462 	emad_array_in = mtd_orig->emad;
463 	emad_array_out = (struct ffa_emad_v1_0 *)
464 			 ((uint8_t *) out + out->emad_offset);
465 
466 	/* Copy across the emad structs. */
467 	for (unsigned int i = 0U; i < out->emad_count; i++) {
468 		/* Bound check for emad array. */
469 		if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
470 		    ((uint8_t *) mtd_orig + orig->desc_size)) {
471 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
472 			return false;
473 		}
474 		memcpy(&emad_array_out[i], &emad_array_in[i],
475 		       sizeof(struct ffa_emad_v1_0));
476 	}
477 
478 	/* Place the mrd descriptors after the end of the emad descriptors.*/
479 	mrd_in_offset = emad_array_in->comp_mrd_offset;
480 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
481 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
482 
483 	/* Add the size of the composite memory region descriptor. */
484 	mrd_size += sizeof(struct ffa_comp_mrd);
485 
486 	/* Find the mrd descriptor. */
487 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
488 
489 	/* Add the size of the constituent memory region descriptors. */
490 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
491 
492 	/*
493 	 * Update the offset in the emads by the delta between the input and
494 	 * output addresses.
495 	 */
496 	for (unsigned int i = 0U; i < out->emad_count; i++) {
497 		emad_array_out[i].comp_mrd_offset =
498 			emad_array_in[i].comp_mrd_offset +
499 			(mrd_out_offset - mrd_in_offset);
500 	}
501 
502 	/* Verify that we stay within bound of the memory descriptors. */
503 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
504 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
505 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
506 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
507 		ERROR("%s: Invalid mrd structure.\n", __func__);
508 		return false;
509 	}
510 
511 	/* Copy the mrd descriptors directly. */
512 	memcpy(mrd_out, mrd_in, mrd_size);
513 
514 	return true;
515 }
516 
517 /**
518  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
519  *                                v1.0 memory object.
520  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
521  * @orig:       The shared memory object containing the v1.1 descriptor.
522  *
523  * Return: true if the conversion is successful else false.
524  */
525 static bool
526 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
527 			     struct spmc_shmem_obj *orig)
528 {
529 	struct ffa_mtd *mtd_orig = &orig->desc;
530 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
531 	struct ffa_emad_v1_0 *emad_in;
532 	struct ffa_emad_v1_0 *emad_array_in;
533 	struct ffa_emad_v1_0 *emad_array_out;
534 	struct ffa_comp_mrd *mrd_in;
535 	struct ffa_comp_mrd *mrd_out;
536 
537 	size_t mrd_in_offset;
538 	size_t mrd_out_offset;
539 	size_t emad_out_array_size;
540 	size_t mrd_size = 0;
541 	size_t orig_desc_size = orig->desc_size;
542 
543 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
544 	out->sender_id = mtd_orig->sender_id;
545 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
546 	out->flags = mtd_orig->flags;
547 	out->handle = mtd_orig->handle;
548 	out->tag = mtd_orig->tag;
549 	out->emad_count = mtd_orig->emad_count;
550 
551 	/* Determine the location of the emad array in both descriptors. */
552 	emad_array_in = (struct ffa_emad_v1_0 *)
553 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
554 	emad_array_out = out->emad;
555 
556 	/* Copy across the emad structs. */
557 	emad_in = emad_array_in;
558 	for (unsigned int i = 0U; i < out->emad_count; i++) {
559 		/* Bound check for emad array. */
560 		if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
561 				((uint8_t *) mtd_orig + orig_desc_size)) {
562 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
563 			return false;
564 		}
565 		memcpy(&emad_array_out[i], emad_in,
566 		       sizeof(struct ffa_emad_v1_0));
567 
568 		emad_in +=  mtd_orig->emad_size;
569 	}
570 
571 	/* Place the mrd descriptors after the end of the emad descriptors. */
572 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
573 
574 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
575 			  emad_out_array_size;
576 
577 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
578 
579 	mrd_in_offset = mtd_orig->emad_offset +
580 			(mtd_orig->emad_size * mtd_orig->emad_count);
581 
582 	/* Add the size of the composite memory region descriptor. */
583 	mrd_size += sizeof(struct ffa_comp_mrd);
584 
585 	/* Find the mrd descriptor. */
586 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
587 
588 	/* Add the size of the constituent memory region descriptors. */
589 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
590 
591 	/*
592 	 * Update the offset in the emads by the delta between the input and
593 	 * output addresses.
594 	 */
595 	emad_in = emad_array_in;
596 
597 	for (unsigned int i = 0U; i < out->emad_count; i++) {
598 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
599 						    (mrd_out_offset -
600 						     mrd_in_offset);
601 		emad_in +=  mtd_orig->emad_size;
602 	}
603 
604 	/* Verify that we stay within bound of the memory descriptors. */
605 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
606 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
607 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
608 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
609 		ERROR("%s: Invalid mrd structure.\n", __func__);
610 		return false;
611 	}
612 
613 	/* Copy the mrd descriptors directly. */
614 	memcpy(mrd_out, mrd_in, mrd_size);
615 
616 	return true;
617 }
618 
619 /**
620  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
621  *                                     the v1.0 format and populates the
622  *                                     provided buffer.
623  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
624  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
625  * @buf_size:	    Size of the buffer to populate.
626  * @offset:	    The offset of the converted descriptor to copy.
627  * @copy_size:	    Will be populated with the number of bytes copied.
628  * @out_desc_size:  Will be populated with the total size of the v1.0
629  *                  descriptor.
630  *
631  * Return: 0 if conversion and population succeeded.
632  * Note: This function invalidates the reference to @orig therefore
633  * `spmc_shmem_obj_lookup` must be called if further usage is required.
634  */
635 static uint32_t
636 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
637 				 size_t buf_size, size_t offset,
638 				 size_t *copy_size, size_t *v1_0_desc_size)
639 {
640 		struct spmc_shmem_obj *v1_0_obj;
641 
642 		/* Calculate the size that the v1.0 descriptor will require. */
643 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
644 					&orig_obj->desc, orig_obj->desc_size);
645 
646 		if (*v1_0_desc_size == 0) {
647 			ERROR("%s: cannot determine size of descriptor.\n",
648 			      __func__);
649 			return FFA_ERROR_INVALID_PARAMETER;
650 		}
651 
652 		/* Get a new obj to store the v1.0 descriptor. */
653 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
654 						*v1_0_desc_size);
655 
656 		if (!v1_0_obj) {
657 			return FFA_ERROR_NO_MEMORY;
658 		}
659 
660 		/* Perform the conversion from v1.1 to v1.0. */
661 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
662 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
663 			return FFA_ERROR_INVALID_PARAMETER;
664 		}
665 
666 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
667 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
668 
669 		/*
670 		 * We're finished with the v1.0 descriptor for now so free it.
671 		 * Note that this will invalidate any references to the v1.1
672 		 * descriptor.
673 		 */
674 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
675 
676 		return 0;
677 }
678 
679 static int
680 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
681 			size_t fragment_length, size_t total_length)
682 {
683 	unsigned long long emad_end;
684 	unsigned long long emad_size;
685 	unsigned long long emad_offset;
686 	unsigned int min_desc_size;
687 
688 	/* Determine the appropriate minimum descriptor size. */
689 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
690 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
691 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
692 		min_desc_size = sizeof(struct ffa_mtd);
693 	} else {
694 		return FFA_ERROR_INVALID_PARAMETER;
695 	}
696 	if (fragment_length < min_desc_size) {
697 		WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
698 		     min_desc_size);
699 		return FFA_ERROR_INVALID_PARAMETER;
700 	}
701 
702 	if (desc->emad_count == 0U) {
703 		WARN("%s: unsupported attribute desc count %u.\n",
704 		     __func__, desc->emad_count);
705 		return FFA_ERROR_INVALID_PARAMETER;
706 	}
707 
708 	/*
709 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
710 	 * format, otherwise assume it is a v1.1 format.
711 	 */
712 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
713 		emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
714 	} else {
715 		if (!is_aligned(desc->emad_offset, 16)) {
716 			WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
717 			     __func__, desc->emad_offset);
718 			return FFA_ERROR_INVALID_PARAMETER;
719 		}
720 		if (desc->emad_offset < sizeof(struct ffa_mtd)) {
721 			WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
722 			     __func__, desc->emad_offset,
723 			     sizeof(struct ffa_mtd));
724 			return FFA_ERROR_INVALID_PARAMETER;
725 		}
726 		emad_offset = desc->emad_offset;
727 		if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
728 			WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
729 			     desc->emad_size, sizeof(struct ffa_emad_v1_0));
730 			return FFA_ERROR_INVALID_PARAMETER;
731 		}
732 		if (!is_aligned(desc->emad_size, 16)) {
733 			WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
734 			     __func__, desc->emad_size);
735 			return FFA_ERROR_INVALID_PARAMETER;
736 		}
737 		emad_size = desc->emad_size;
738 	}
739 
740 	/*
741 	 * Overflow is impossible: the arithmetic happens in at least 64-bit
742 	 * precision, but all of the operands are bounded by UINT32_MAX, and
743 	 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
744 	 * = (2^64 - 1).
745 	 */
746 	CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
747 	emad_end = (desc->emad_count * (unsigned long long)emad_size) +
748 		   (unsigned long long)sizeof(struct ffa_comp_mrd) +
749 		   (unsigned long long)emad_offset;
750 
751 	if (emad_end > total_length) {
752 		WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
753 		     __func__, emad_end, total_length);
754 		return FFA_ERROR_INVALID_PARAMETER;
755 	}
756 
757 	return 0;
758 }
759 
760 /**
761  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
762  * @obj:	  Object containing ffa_memory_region_descriptor.
763  * @ffa_version:  FF-A version of the provided descriptor.
764  *
765  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
766  * offset or count is invalid.
767  */
768 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
769 				uint32_t ffa_version)
770 {
771 	const struct ffa_emad_v1_0 *emad;
772 	size_t emad_size;
773 	uint32_t comp_mrd_offset = 0;
774 
775 	if (obj->desc_filled != obj->desc_size) {
776 		ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
777 		      __func__, obj->desc_filled, obj->desc_size);
778 		panic();
779 	}
780 
781 	if (spmc_validate_mtd_start(&obj->desc, ffa_version,
782 				    obj->desc_filled, obj->desc_size)) {
783 		ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
784 		      __func__);
785 		panic();
786 	}
787 
788 	emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
789 				       ffa_version, &emad_size);
790 
791 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
792 		size_t size;
793 		size_t count;
794 		size_t expected_size;
795 		uint64_t total_page_count;
796 		size_t header_emad_size;
797 		uint32_t offset;
798 		struct ffa_comp_mrd *comp;
799 		ffa_endpoint_id16_t ep_id;
800 
801 		/*
802 		 * Validate the calculated emad address resides within the
803 		 * descriptor.
804 		 */
805 		if ((uintptr_t) emad >
806 		    ((uintptr_t) &obj->desc + obj->desc_size - emad_size)) {
807 			ERROR("BUG: Invalid emad access not detected earlier.\n");
808 			panic();
809 		}
810 
811 		emad = (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + emad_size);
812 		offset = emad->comp_mrd_offset;
813 
814 		/*
815 		 * If a partition ID resides in the secure world validate that
816 		 * the partition ID is for a known partition. Ignore any
817 		 * partition ID belonging to the normal world as it is assumed
818 		 * the Hypervisor will have validated these.
819 		 */
820 		ep_id = emad->mapd.endpoint_id;
821 		if (ffa_is_secure_world_id(ep_id)) {
822 			if (spmc_get_sp_ctx(ep_id) == NULL) {
823 				WARN("%s: Invalid receiver id 0x%x\n",
824 				     __func__, ep_id);
825 				return -EINVAL;
826 			}
827 		}
828 
829 		/*
830 		 * The offset provided to the composite memory region descriptor
831 		 * should be consistent across endpoint descriptors. Store the
832 		 * first entry and compare against subsequent entries.
833 		 */
834 		if (comp_mrd_offset == 0) {
835 			comp_mrd_offset = offset;
836 		} else {
837 			if (comp_mrd_offset != offset) {
838 				ERROR("%s: mismatching offsets provided, %u != %u\n",
839 				       __func__, offset, comp_mrd_offset);
840 				return -EINVAL;
841 			}
842 			continue; /* Remainder only executed on first iteration. */
843 		}
844 
845 		header_emad_size = (size_t)((uint8_t *)emad - (uint8_t *)&obj->desc) +
846 			(obj->desc.emad_count * emad_size);
847 
848 		if (offset < header_emad_size) {
849 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
850 			     __func__, offset, header_emad_size);
851 			return -EINVAL;
852 		}
853 
854 		size = obj->desc_size;
855 
856 		if (offset > size) {
857 			WARN("%s: invalid object, offset %u > total size %zu\n",
858 			     __func__, offset, obj->desc_size);
859 			return -EINVAL;
860 		}
861 		size -= offset;
862 
863 		if (size < sizeof(struct ffa_comp_mrd)) {
864 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
865 			     __func__, offset, obj->desc_size);
866 			return -EINVAL;
867 		}
868 		size -= sizeof(struct ffa_comp_mrd);
869 
870 		count = size / sizeof(struct ffa_cons_mrd);
871 
872 		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
873 
874 		if (comp == NULL) {
875 			WARN("%s: invalid comp_mrd offset\n", __func__);
876 			return -EINVAL;
877 		}
878 
879 		if (comp->address_range_count != count) {
880 			WARN("%s: invalid object, desc count %u != %zu\n",
881 			     __func__, comp->address_range_count, count);
882 			return -EINVAL;
883 		}
884 
885 		expected_size = offset + sizeof(*comp) +
886 			count * sizeof(struct ffa_cons_mrd);
887 
888 		if (expected_size != obj->desc_size) {
889 			WARN("%s: invalid object, computed size %zu != size %zu\n",
890 			       __func__, expected_size, obj->desc_size);
891 			return -EINVAL;
892 		}
893 
894 		total_page_count = 0;
895 
896 		for (size_t i = 0; i < count; i++) {
897 			total_page_count +=
898 				comp->address_range_array[i].page_count;
899 		}
900 		if (comp->total_page_count != total_page_count) {
901 			WARN("%s: invalid object, desc total_page_count %u != %" PRIu64 "\n",
902 			     __func__, comp->total_page_count,
903 			total_page_count);
904 			return -EINVAL;
905 		}
906 	}
907 	return 0;
908 }
909 
910 /**
911  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
912  *				regions that are currently involved with an
913  *				existing memory transactions. This implies that
914  *				the memory is not in a valid state for lending.
915  * @obj:    Object containing ffa_memory_region_descriptor.
916  *
917  * Return: 0 if object is valid, -EINVAL if invalid memory state.
918  */
919 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
920 				      uint32_t ffa_version)
921 {
922 	size_t obj_offset = 0;
923 	struct spmc_shmem_obj *inflight_obj;
924 
925 	struct ffa_comp_mrd *other_mrd;
926 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
927 								  ffa_version);
928 
929 	if (requested_mrd == NULL) {
930 		return -EINVAL;
931 	}
932 
933 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
934 					       &obj_offset);
935 
936 	while (inflight_obj != NULL) {
937 		/*
938 		 * Don't compare the transaction to itself or to partially
939 		 * transmitted descriptors.
940 		 */
941 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
942 		    (obj->desc_size == obj->desc_filled)) {
943 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
944 							  FFA_VERSION_COMPILED);
945 			if (other_mrd == NULL) {
946 				return -EINVAL;
947 			}
948 			if (overlapping_memory_regions(requested_mrd,
949 						       other_mrd)) {
950 				return -EINVAL;
951 			}
952 		}
953 
954 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
955 						       &obj_offset);
956 	}
957 	return 0;
958 }
959 
960 static long spmc_ffa_fill_desc(struct mailbox *mbox,
961 			       struct spmc_shmem_obj *obj,
962 			       uint32_t fragment_length,
963 			       ffa_mtd_flag32_t mtd_flag,
964 			       uint32_t ffa_version,
965 			       void *smc_handle)
966 {
967 	int ret;
968 	size_t emad_size;
969 	uint32_t handle_low;
970 	uint32_t handle_high;
971 	struct ffa_emad_v1_0 *emad;
972 	struct ffa_emad_v1_0 *other_emad;
973 
974 	if (mbox->rxtx_page_count == 0U) {
975 		WARN("%s: buffer pair not registered.\n", __func__);
976 		ret = FFA_ERROR_INVALID_PARAMETER;
977 		goto err_arg;
978 	}
979 
980 	CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
981 	if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
982 		WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
983 		     fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
984 		ret = FFA_ERROR_INVALID_PARAMETER;
985 		goto err_arg;
986 	}
987 
988 	if (fragment_length > obj->desc_size - obj->desc_filled) {
989 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
990 		     fragment_length, obj->desc_size - obj->desc_filled);
991 		ret = FFA_ERROR_INVALID_PARAMETER;
992 		goto err_arg;
993 	}
994 
995 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
996 	       (uint8_t *) mbox->tx_buffer, fragment_length);
997 
998 	/* Ensure that the sender ID resides in the normal world. */
999 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1000 		WARN("%s: Invalid sender ID 0x%x.\n",
1001 		     __func__, obj->desc.sender_id);
1002 		ret = FFA_ERROR_DENIED;
1003 		goto err_arg;
1004 	}
1005 
1006 	/* Ensure the NS bit is set to 0. */
1007 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1008 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1009 		ret = FFA_ERROR_INVALID_PARAMETER;
1010 		goto err_arg;
1011 	}
1012 
1013 	/*
1014 	 * We don't currently support any optional flags so ensure none are
1015 	 * requested.
1016 	 */
1017 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
1018 	    (obj->desc.flags != mtd_flag)) {
1019 		WARN("%s: invalid memory transaction flags %u != %u\n",
1020 		     __func__, obj->desc.flags, mtd_flag);
1021 		ret = FFA_ERROR_INVALID_PARAMETER;
1022 		goto err_arg;
1023 	}
1024 
1025 	if (obj->desc_filled == 0U) {
1026 		/* First fragment, descriptor header has been copied */
1027 		ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1028 					      fragment_length, obj->desc_size);
1029 		if (ret != 0) {
1030 			goto err_bad_desc;
1031 		}
1032 
1033 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1034 		obj->desc.flags |= mtd_flag;
1035 	}
1036 
1037 	obj->desc_filled += fragment_length;
1038 
1039 	handle_low = (uint32_t)obj->desc.handle;
1040 	handle_high = obj->desc.handle >> 32;
1041 
1042 	if (obj->desc_filled != obj->desc_size) {
1043 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1044 			 handle_high, obj->desc_filled,
1045 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1046 	}
1047 
1048 	/* The full descriptor has been received, perform any final checks. */
1049 
1050 	ret = spmc_shmem_check_obj(obj, ffa_version);
1051 	if (ret != 0) {
1052 		ret = FFA_ERROR_INVALID_PARAMETER;
1053 		goto err_bad_desc;
1054 	}
1055 
1056 	/* Ensure partition IDs are not duplicated. */
1057 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
1058 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1059 					       &emad_size);
1060 
1061 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
1062 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1063 							     ffa_version,
1064 							     &emad_size);
1065 
1066 			if (emad->mapd.endpoint_id ==
1067 				other_emad->mapd.endpoint_id) {
1068 				WARN("%s: Duplicated endpoint id 0x%x\n",
1069 				     __func__, emad->mapd.endpoint_id);
1070 				ret = FFA_ERROR_INVALID_PARAMETER;
1071 				goto err_bad_desc;
1072 			}
1073 		}
1074 	}
1075 
1076 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1077 	if (ret) {
1078 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1079 		ret = FFA_ERROR_INVALID_PARAMETER;
1080 		goto err_bad_desc;
1081 	}
1082 
1083 	/*
1084 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1085 	 * the descriptor format to use the v1.1 structures.
1086 	 */
1087 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1088 		struct spmc_shmem_obj *v1_1_obj;
1089 		uint64_t mem_handle;
1090 
1091 		/* Calculate the size that the v1.1 descriptor will required. */
1092 		size_t v1_1_desc_size =
1093 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1094 						      obj->desc_size);
1095 
1096 		if (v1_1_desc_size == 0U) {
1097 			ERROR("%s: cannot determine size of descriptor.\n",
1098 			      __func__);
1099 			goto err_arg;
1100 		}
1101 
1102 		/* Get a new obj to store the v1.1 descriptor. */
1103 		v1_1_obj =
1104 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1105 
1106 		if (!v1_1_obj) {
1107 			ret = FFA_ERROR_NO_MEMORY;
1108 			goto err_arg;
1109 		}
1110 
1111 		/* Perform the conversion from v1.0 to v1.1. */
1112 		v1_1_obj->desc_size = v1_1_desc_size;
1113 		v1_1_obj->desc_filled = v1_1_desc_size;
1114 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1115 			ERROR("%s: Could not convert mtd!\n", __func__);
1116 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1117 			goto err_arg;
1118 		}
1119 
1120 		/*
1121 		 * We're finished with the v1.0 descriptor so free it
1122 		 * and continue our checks with the new v1.1 descriptor.
1123 		 */
1124 		mem_handle = obj->desc.handle;
1125 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1126 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1127 		if (obj == NULL) {
1128 			ERROR("%s: Failed to find converted descriptor.\n",
1129 			     __func__);
1130 			ret = FFA_ERROR_INVALID_PARAMETER;
1131 			return spmc_ffa_error_return(smc_handle, ret);
1132 		}
1133 	}
1134 
1135 	/* Allow for platform specific operations to be performed. */
1136 	ret = plat_spmc_shmem_begin(&obj->desc);
1137 	if (ret != 0) {
1138 		goto err_arg;
1139 	}
1140 
1141 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1142 		 0, 0, 0);
1143 
1144 err_bad_desc:
1145 err_arg:
1146 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1147 	return spmc_ffa_error_return(smc_handle, ret);
1148 }
1149 
1150 /**
1151  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1152  * @client:             Client state.
1153  * @total_length:       Total length of shared memory descriptor.
1154  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1155  *                      this call.
1156  * @address:            Not supported, must be 0.
1157  * @page_count:         Not supported, must be 0.
1158  * @smc_handle:         Handle passed to smc call. Used to return
1159  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1160  *
1161  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1162  * to share or lend memory from non-secure os to secure os (with no stream
1163  * endpoints).
1164  *
1165  * Return: 0 on success, error code on failure.
1166  */
1167 long spmc_ffa_mem_send(uint32_t smc_fid,
1168 			bool secure_origin,
1169 			uint64_t total_length,
1170 			uint32_t fragment_length,
1171 			uint64_t address,
1172 			uint32_t page_count,
1173 			void *cookie,
1174 			void *handle,
1175 			uint64_t flags)
1176 
1177 {
1178 	long ret;
1179 	struct spmc_shmem_obj *obj;
1180 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1181 	ffa_mtd_flag32_t mtd_flag;
1182 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1183 	size_t min_desc_size;
1184 
1185 	if (address != 0U || page_count != 0U) {
1186 		WARN("%s: custom memory region for message not supported.\n",
1187 		     __func__);
1188 		return spmc_ffa_error_return(handle,
1189 					     FFA_ERROR_INVALID_PARAMETER);
1190 	}
1191 
1192 	if (secure_origin) {
1193 		WARN("%s: unsupported share direction.\n", __func__);
1194 		return spmc_ffa_error_return(handle,
1195 					     FFA_ERROR_INVALID_PARAMETER);
1196 	}
1197 
1198 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1199 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1200 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1201 		min_desc_size = sizeof(struct ffa_mtd);
1202 	} else {
1203 		WARN("%s: bad FF-A version.\n", __func__);
1204 		return spmc_ffa_error_return(handle,
1205 					     FFA_ERROR_INVALID_PARAMETER);
1206 	}
1207 
1208 	/* Check if the descriptor is too small for the FF-A version. */
1209 	if (fragment_length < min_desc_size) {
1210 		WARN("%s: bad first fragment size %u < %zu\n",
1211 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1212 		return spmc_ffa_error_return(handle,
1213 					     FFA_ERROR_INVALID_PARAMETER);
1214 	}
1215 
1216 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1217 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1218 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1219 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1220 	} else {
1221 		WARN("%s: invalid memory management operation.\n", __func__);
1222 		return spmc_ffa_error_return(handle,
1223 					     FFA_ERROR_INVALID_PARAMETER);
1224 	}
1225 
1226 	spin_lock(&spmc_shmem_obj_state.lock);
1227 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1228 	if (obj == NULL) {
1229 		ret = FFA_ERROR_NO_MEMORY;
1230 		goto err_unlock;
1231 	}
1232 
1233 	spin_lock(&mbox->lock);
1234 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1235 				 ffa_version, handle);
1236 	spin_unlock(&mbox->lock);
1237 
1238 	spin_unlock(&spmc_shmem_obj_state.lock);
1239 	return ret;
1240 
1241 err_unlock:
1242 	spin_unlock(&spmc_shmem_obj_state.lock);
1243 	return spmc_ffa_error_return(handle, ret);
1244 }
1245 
1246 /**
1247  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1248  * @client:             Client state.
1249  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1250  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1251  * @fragment_length:    Length of fragments transmitted.
1252  * @sender_id:          Vmid of sender in bits [31:16]
1253  * @smc_handle:         Handle passed to smc call. Used to return
1254  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1255  *
1256  * Return: @smc_handle on success, error code on failure.
1257  */
1258 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1259 			  bool secure_origin,
1260 			  uint64_t handle_low,
1261 			  uint64_t handle_high,
1262 			  uint32_t fragment_length,
1263 			  uint32_t sender_id,
1264 			  void *cookie,
1265 			  void *handle,
1266 			  uint64_t flags)
1267 {
1268 	long ret;
1269 	uint32_t desc_sender_id;
1270 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1271 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1272 
1273 	struct spmc_shmem_obj *obj;
1274 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1275 
1276 	spin_lock(&spmc_shmem_obj_state.lock);
1277 
1278 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1279 	if (obj == NULL) {
1280 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1281 		     __func__, mem_handle);
1282 		ret = FFA_ERROR_INVALID_PARAMETER;
1283 		goto err_unlock;
1284 	}
1285 
1286 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1287 	if (sender_id != desc_sender_id) {
1288 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1289 		     sender_id, desc_sender_id);
1290 		ret = FFA_ERROR_INVALID_PARAMETER;
1291 		goto err_unlock;
1292 	}
1293 
1294 	if (obj->desc_filled == obj->desc_size) {
1295 		WARN("%s: object desc already filled, %zu\n", __func__,
1296 		     obj->desc_filled);
1297 		ret = FFA_ERROR_INVALID_PARAMETER;
1298 		goto err_unlock;
1299 	}
1300 
1301 	spin_lock(&mbox->lock);
1302 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1303 				 handle);
1304 	spin_unlock(&mbox->lock);
1305 
1306 	spin_unlock(&spmc_shmem_obj_state.lock);
1307 	return ret;
1308 
1309 err_unlock:
1310 	spin_unlock(&spmc_shmem_obj_state.lock);
1311 	return spmc_ffa_error_return(handle, ret);
1312 }
1313 
1314 /**
1315  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1316  *				      if the caller implements a version greater
1317  *				      than FF-A 1.0 or if they have requested
1318  *				      the functionality.
1319  *				      TODO: We are assuming that the caller is
1320  *				      an SP. To support retrieval from the
1321  *				      normal world this function will need to be
1322  *				      expanded accordingly.
1323  * @resp:       Descriptor populated in callers RX buffer.
1324  * @sp_ctx:     Context of the calling SP.
1325  */
1326 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1327 			 struct secure_partition_desc *sp_ctx)
1328 {
1329 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1330 	    sp_ctx->ns_bit_requested) {
1331 		/*
1332 		 * Currently memory senders must reside in the normal
1333 		 * world, and we do not have the functionlaity to change
1334 		 * the state of memory dynamically. Therefore we can always set
1335 		 * the NS bit to 1.
1336 		 */
1337 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1338 	}
1339 }
1340 
1341 /**
1342  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1343  * @smc_fid:            FID of SMC
1344  * @total_length:       Total length of retrieve request descriptor if this is
1345  *                      the first call. Otherwise (unsupported) must be 0.
1346  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1347  *                      in this call. Only @fragment_length == @length is
1348  *                      supported by this implementation.
1349  * @address:            Not supported, must be 0.
1350  * @page_count:         Not supported, must be 0.
1351  * @smc_handle:         Handle passed to smc call. Used to return
1352  *                      FFA_MEM_RETRIEVE_RESP.
1353  *
1354  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1355  * Used by secure os to retrieve memory already shared by non-secure os.
1356  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1357  * the client must call FFA_MEM_FRAG_RX until the full response has been
1358  * received.
1359  *
1360  * Return: @handle on success, error code on failure.
1361  */
1362 long
1363 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1364 			  bool secure_origin,
1365 			  uint32_t total_length,
1366 			  uint32_t fragment_length,
1367 			  uint64_t address,
1368 			  uint32_t page_count,
1369 			  void *cookie,
1370 			  void *handle,
1371 			  uint64_t flags)
1372 {
1373 	int ret;
1374 	size_t buf_size;
1375 	size_t copy_size = 0;
1376 	size_t min_desc_size;
1377 	size_t out_desc_size = 0;
1378 
1379 	/*
1380 	 * Currently we are only accessing fields that are the same in both the
1381 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1382 	 * here. We only need validate against the appropriate struct size.
1383 	 */
1384 	struct ffa_mtd *resp;
1385 	const struct ffa_mtd *req;
1386 	struct spmc_shmem_obj *obj = NULL;
1387 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1388 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1389 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1390 
1391 	if (!secure_origin) {
1392 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1393 		return spmc_ffa_error_return(handle,
1394 					     FFA_ERROR_INVALID_PARAMETER);
1395 	}
1396 
1397 	if (address != 0U || page_count != 0U) {
1398 		WARN("%s: custom memory region not supported.\n", __func__);
1399 		return spmc_ffa_error_return(handle,
1400 					     FFA_ERROR_INVALID_PARAMETER);
1401 	}
1402 
1403 	spin_lock(&mbox->lock);
1404 
1405 	req = mbox->tx_buffer;
1406 	resp = mbox->rx_buffer;
1407 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1408 
1409 	if (mbox->rxtx_page_count == 0U) {
1410 		WARN("%s: buffer pair not registered.\n", __func__);
1411 		ret = FFA_ERROR_INVALID_PARAMETER;
1412 		goto err_unlock_mailbox;
1413 	}
1414 
1415 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1416 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1417 		ret = FFA_ERROR_DENIED;
1418 		goto err_unlock_mailbox;
1419 	}
1420 
1421 	if (fragment_length != total_length) {
1422 		WARN("%s: fragmented retrieve request not supported.\n",
1423 		     __func__);
1424 		ret = FFA_ERROR_INVALID_PARAMETER;
1425 		goto err_unlock_mailbox;
1426 	}
1427 
1428 	if (req->emad_count == 0U) {
1429 		WARN("%s: unsupported attribute desc count %u.\n",
1430 		     __func__, obj->desc.emad_count);
1431 		ret = FFA_ERROR_INVALID_PARAMETER;
1432 		goto err_unlock_mailbox;
1433 	}
1434 
1435 	/* Determine the appropriate minimum descriptor size. */
1436 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1437 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1438 	} else {
1439 		min_desc_size = sizeof(struct ffa_mtd);
1440 	}
1441 	if (total_length < min_desc_size) {
1442 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1443 		     min_desc_size);
1444 		ret = FFA_ERROR_INVALID_PARAMETER;
1445 		goto err_unlock_mailbox;
1446 	}
1447 
1448 	spin_lock(&spmc_shmem_obj_state.lock);
1449 
1450 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1451 	if (obj == NULL) {
1452 		ret = FFA_ERROR_INVALID_PARAMETER;
1453 		goto err_unlock_all;
1454 	}
1455 
1456 	if (obj->desc_filled != obj->desc_size) {
1457 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1458 		     __func__, obj->desc_filled, obj->desc_size);
1459 		ret = FFA_ERROR_INVALID_PARAMETER;
1460 		goto err_unlock_all;
1461 	}
1462 
1463 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1464 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1465 		     __func__, req->sender_id, obj->desc.sender_id);
1466 		ret = FFA_ERROR_INVALID_PARAMETER;
1467 		goto err_unlock_all;
1468 	}
1469 
1470 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1471 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1472 		     __func__, req->tag, obj->desc.tag);
1473 		ret = FFA_ERROR_INVALID_PARAMETER;
1474 		goto err_unlock_all;
1475 	}
1476 
1477 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1478 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1479 		     __func__, req->emad_count, obj->desc.emad_count);
1480 		ret = FFA_ERROR_INVALID_PARAMETER;
1481 		goto err_unlock_all;
1482 	}
1483 
1484 	/* Ensure the NS bit is set to 0 in the request. */
1485 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1486 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1487 		ret = FFA_ERROR_INVALID_PARAMETER;
1488 		goto err_unlock_all;
1489 	}
1490 
1491 	if (req->flags != 0U) {
1492 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1493 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1494 			/*
1495 			 * If the retrieve request specifies the memory
1496 			 * transaction ensure it matches what we expect.
1497 			 */
1498 			WARN("%s: wrong mem transaction flags %x != %x\n",
1499 			__func__, req->flags, obj->desc.flags);
1500 			ret = FFA_ERROR_INVALID_PARAMETER;
1501 			goto err_unlock_all;
1502 		}
1503 
1504 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1505 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1506 			/*
1507 			 * Current implementation does not support donate and
1508 			 * it supports no other flags.
1509 			 */
1510 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1511 			ret = FFA_ERROR_INVALID_PARAMETER;
1512 			goto err_unlock_all;
1513 		}
1514 	}
1515 
1516 	/* Validate the caller is a valid participant. */
1517 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1518 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1519 			__func__, sp_ctx->sp_id);
1520 		ret = FFA_ERROR_INVALID_PARAMETER;
1521 		goto err_unlock_all;
1522 	}
1523 
1524 	/* Validate that the provided emad offset and structure is valid.*/
1525 	for (size_t i = 0; i < req->emad_count; i++) {
1526 		size_t emad_size;
1527 		struct ffa_emad_v1_0 *emad;
1528 
1529 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1530 					       &emad_size);
1531 
1532 		if ((uintptr_t) emad >= (uintptr_t)
1533 					((uint8_t *) req + total_length)) {
1534 			WARN("Invalid emad access.\n");
1535 			ret = FFA_ERROR_INVALID_PARAMETER;
1536 			goto err_unlock_all;
1537 		}
1538 	}
1539 
1540 	/*
1541 	 * Validate all the endpoints match in the case of multiple
1542 	 * borrowers. We don't mandate that the order of the borrowers
1543 	 * must match in the descriptors therefore check to see if the
1544 	 * endpoints match in any order.
1545 	 */
1546 	for (size_t i = 0; i < req->emad_count; i++) {
1547 		bool found = false;
1548 		size_t emad_size;
1549 		struct ffa_emad_v1_0 *emad;
1550 		struct ffa_emad_v1_0 *other_emad;
1551 
1552 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1553 					       &emad_size);
1554 
1555 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1556 			other_emad = spmc_shmem_obj_get_emad(
1557 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1558 					&emad_size);
1559 
1560 			if (req->emad_count &&
1561 			    emad->mapd.endpoint_id ==
1562 			    other_emad->mapd.endpoint_id) {
1563 				found = true;
1564 				break;
1565 			}
1566 		}
1567 
1568 		if (!found) {
1569 			WARN("%s: invalid receiver id (0x%x).\n",
1570 			     __func__, emad->mapd.endpoint_id);
1571 			ret = FFA_ERROR_INVALID_PARAMETER;
1572 			goto err_unlock_all;
1573 		}
1574 	}
1575 
1576 	mbox->state = MAILBOX_STATE_FULL;
1577 
1578 	if (req->emad_count != 0U) {
1579 		obj->in_use++;
1580 	}
1581 
1582 	/*
1583 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1584 	 * directly.
1585 	 */
1586 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1587 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1588 							&copy_size,
1589 							&out_desc_size);
1590 		if (ret != 0U) {
1591 			ERROR("%s: Failed to process descriptor.\n", __func__);
1592 			goto err_unlock_all;
1593 		}
1594 	} else {
1595 		copy_size = MIN(obj->desc_size, buf_size);
1596 		out_desc_size = obj->desc_size;
1597 
1598 		memcpy(resp, &obj->desc, copy_size);
1599 	}
1600 
1601 	/* Set the NS bit in the response if applicable. */
1602 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1603 
1604 	spin_unlock(&spmc_shmem_obj_state.lock);
1605 	spin_unlock(&mbox->lock);
1606 
1607 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1608 		 copy_size, 0, 0, 0, 0, 0);
1609 
1610 err_unlock_all:
1611 	spin_unlock(&spmc_shmem_obj_state.lock);
1612 err_unlock_mailbox:
1613 	spin_unlock(&mbox->lock);
1614 	return spmc_ffa_error_return(handle, ret);
1615 }
1616 
1617 /**
1618  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1619  * @client:             Client state.
1620  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1621  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1622  * @fragment_offset:    Byte offset in descriptor to resume at.
1623  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1624  *                      hypervisor. 0 otherwise.
1625  * @smc_handle:         Handle passed to smc call. Used to return
1626  *                      FFA_MEM_FRAG_TX.
1627  *
1628  * Return: @smc_handle on success, error code on failure.
1629  */
1630 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1631 			  bool secure_origin,
1632 			  uint32_t handle_low,
1633 			  uint32_t handle_high,
1634 			  uint32_t fragment_offset,
1635 			  uint32_t sender_id,
1636 			  void *cookie,
1637 			  void *handle,
1638 			  uint64_t flags)
1639 {
1640 	int ret;
1641 	void *src;
1642 	size_t buf_size;
1643 	size_t copy_size;
1644 	size_t full_copy_size;
1645 	uint32_t desc_sender_id;
1646 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1647 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1648 	struct spmc_shmem_obj *obj;
1649 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1650 
1651 	if (!secure_origin) {
1652 		WARN("%s: can only be called from swld.\n",
1653 		     __func__);
1654 		return spmc_ffa_error_return(handle,
1655 					     FFA_ERROR_INVALID_PARAMETER);
1656 	}
1657 
1658 	spin_lock(&spmc_shmem_obj_state.lock);
1659 
1660 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1661 	if (obj == NULL) {
1662 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1663 		     __func__, mem_handle);
1664 		ret = FFA_ERROR_INVALID_PARAMETER;
1665 		goto err_unlock_shmem;
1666 	}
1667 
1668 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1669 	if (sender_id != 0U && sender_id != desc_sender_id) {
1670 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1671 		     sender_id, desc_sender_id);
1672 		ret = FFA_ERROR_INVALID_PARAMETER;
1673 		goto err_unlock_shmem;
1674 	}
1675 
1676 	if (fragment_offset >= obj->desc_size) {
1677 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1678 		     __func__, fragment_offset, obj->desc_size);
1679 		ret = FFA_ERROR_INVALID_PARAMETER;
1680 		goto err_unlock_shmem;
1681 	}
1682 
1683 	spin_lock(&mbox->lock);
1684 
1685 	if (mbox->rxtx_page_count == 0U) {
1686 		WARN("%s: buffer pair not registered.\n", __func__);
1687 		ret = FFA_ERROR_INVALID_PARAMETER;
1688 		goto err_unlock_all;
1689 	}
1690 
1691 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1692 		WARN("%s: RX Buffer is full!\n", __func__);
1693 		ret = FFA_ERROR_DENIED;
1694 		goto err_unlock_all;
1695 	}
1696 
1697 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1698 
1699 	mbox->state = MAILBOX_STATE_FULL;
1700 
1701 	/*
1702 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1703 	 * directly.
1704 	 */
1705 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1706 		size_t out_desc_size;
1707 
1708 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1709 							buf_size,
1710 							fragment_offset,
1711 							&copy_size,
1712 							&out_desc_size);
1713 		if (ret != 0U) {
1714 			ERROR("%s: Failed to process descriptor.\n", __func__);
1715 			goto err_unlock_all;
1716 		}
1717 	} else {
1718 		full_copy_size = obj->desc_size - fragment_offset;
1719 		copy_size = MIN(full_copy_size, buf_size);
1720 
1721 		src = &obj->desc;
1722 
1723 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1724 	}
1725 
1726 	spin_unlock(&mbox->lock);
1727 	spin_unlock(&spmc_shmem_obj_state.lock);
1728 
1729 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1730 		 copy_size, sender_id, 0, 0, 0);
1731 
1732 err_unlock_all:
1733 	spin_unlock(&mbox->lock);
1734 err_unlock_shmem:
1735 	spin_unlock(&spmc_shmem_obj_state.lock);
1736 	return spmc_ffa_error_return(handle, ret);
1737 }
1738 
1739 /**
1740  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1741  * @client:             Client state.
1742  *
1743  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1744  * Used by secure os release previously shared memory to non-secure os.
1745  *
1746  * The handle to release must be in the client's (secure os's) transmit buffer.
1747  *
1748  * Return: 0 on success, error code on failure.
1749  */
1750 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1751 			    bool secure_origin,
1752 			    uint32_t handle_low,
1753 			    uint32_t handle_high,
1754 			    uint32_t fragment_offset,
1755 			    uint32_t sender_id,
1756 			    void *cookie,
1757 			    void *handle,
1758 			    uint64_t flags)
1759 {
1760 	int ret;
1761 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1762 	struct spmc_shmem_obj *obj;
1763 	const struct ffa_mem_relinquish_descriptor *req;
1764 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1765 
1766 	if (!secure_origin) {
1767 		WARN("%s: unsupported relinquish direction.\n", __func__);
1768 		return spmc_ffa_error_return(handle,
1769 					     FFA_ERROR_INVALID_PARAMETER);
1770 	}
1771 
1772 	spin_lock(&mbox->lock);
1773 
1774 	if (mbox->rxtx_page_count == 0U) {
1775 		WARN("%s: buffer pair not registered.\n", __func__);
1776 		ret = FFA_ERROR_INVALID_PARAMETER;
1777 		goto err_unlock_mailbox;
1778 	}
1779 
1780 	req = mbox->tx_buffer;
1781 
1782 	if (req->flags != 0U) {
1783 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1784 		ret = FFA_ERROR_INVALID_PARAMETER;
1785 		goto err_unlock_mailbox;
1786 	}
1787 
1788 	if (req->endpoint_count == 0) {
1789 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1790 		ret = FFA_ERROR_INVALID_PARAMETER;
1791 		goto err_unlock_mailbox;
1792 	}
1793 
1794 	spin_lock(&spmc_shmem_obj_state.lock);
1795 
1796 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1797 	if (obj == NULL) {
1798 		ret = FFA_ERROR_INVALID_PARAMETER;
1799 		goto err_unlock_all;
1800 	}
1801 
1802 	/*
1803 	 * Validate the endpoint ID was populated correctly. We don't currently
1804 	 * support proxy endpoints so the endpoint count should always be 1.
1805 	 */
1806 	if (req->endpoint_count != 1U) {
1807 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1808 		     req->endpoint_count);
1809 		ret = FFA_ERROR_INVALID_PARAMETER;
1810 		goto err_unlock_all;
1811 	}
1812 
1813 	/* Validate provided endpoint ID matches the partition ID. */
1814 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1815 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1816 		     req->endpoint_array[0], sp_ctx->sp_id);
1817 		ret = FFA_ERROR_INVALID_PARAMETER;
1818 		goto err_unlock_all;
1819 	}
1820 
1821 	/* Validate the caller is a valid participant. */
1822 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1823 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1824 			__func__, req->endpoint_array[0]);
1825 		ret = FFA_ERROR_INVALID_PARAMETER;
1826 		goto err_unlock_all;
1827 	}
1828 
1829 	if (obj->in_use == 0U) {
1830 		ret = FFA_ERROR_INVALID_PARAMETER;
1831 		goto err_unlock_all;
1832 	}
1833 	obj->in_use--;
1834 
1835 	spin_unlock(&spmc_shmem_obj_state.lock);
1836 	spin_unlock(&mbox->lock);
1837 
1838 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1839 
1840 err_unlock_all:
1841 	spin_unlock(&spmc_shmem_obj_state.lock);
1842 err_unlock_mailbox:
1843 	spin_unlock(&mbox->lock);
1844 	return spmc_ffa_error_return(handle, ret);
1845 }
1846 
1847 /**
1848  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1849  * @client:         Client state.
1850  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1851  * @handle_high:    Unique handle of shared memory object to reclaim.
1852  *                  Bit[63:32].
1853  * @flags:          Unsupported, ignored.
1854  *
1855  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1856  * Used by non-secure os reclaim memory previously shared with secure os.
1857  *
1858  * Return: 0 on success, error code on failure.
1859  */
1860 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1861 			 bool secure_origin,
1862 			 uint32_t handle_low,
1863 			 uint32_t handle_high,
1864 			 uint32_t mem_flags,
1865 			 uint64_t x4,
1866 			 void *cookie,
1867 			 void *handle,
1868 			 uint64_t flags)
1869 {
1870 	int ret;
1871 	struct spmc_shmem_obj *obj;
1872 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1873 
1874 	if (secure_origin) {
1875 		WARN("%s: unsupported reclaim direction.\n", __func__);
1876 		return spmc_ffa_error_return(handle,
1877 					     FFA_ERROR_INVALID_PARAMETER);
1878 	}
1879 
1880 	if (mem_flags != 0U) {
1881 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1882 		return spmc_ffa_error_return(handle,
1883 					     FFA_ERROR_INVALID_PARAMETER);
1884 	}
1885 
1886 	spin_lock(&spmc_shmem_obj_state.lock);
1887 
1888 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1889 	if (obj == NULL) {
1890 		ret = FFA_ERROR_INVALID_PARAMETER;
1891 		goto err_unlock;
1892 	}
1893 	if (obj->in_use != 0U) {
1894 		ret = FFA_ERROR_DENIED;
1895 		goto err_unlock;
1896 	}
1897 
1898 	if (obj->desc_filled != obj->desc_size) {
1899 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1900 		     __func__, obj->desc_filled, obj->desc_size);
1901 		ret = FFA_ERROR_INVALID_PARAMETER;
1902 		goto err_unlock;
1903 	}
1904 
1905 	/* Allow for platform specific operations to be performed. */
1906 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1907 	if (ret != 0) {
1908 		goto err_unlock;
1909 	}
1910 
1911 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1912 	spin_unlock(&spmc_shmem_obj_state.lock);
1913 
1914 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1915 
1916 err_unlock:
1917 	spin_unlock(&spmc_shmem_obj_state.lock);
1918 	return spmc_ffa_error_return(handle, ret);
1919 }
1920