xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision 0560b53e71ab6daefa8e75665a718605478746a4)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 
9 #include <common/debug.h>
10 #include <common/runtime_svc.h>
11 #include <lib/object_pool.h>
12 #include <lib/spinlock.h>
13 #include <lib/xlat_tables/xlat_tables_v2.h>
14 #include <services/ffa_svc.h>
15 #include "spmc.h"
16 #include "spmc_shared_mem.h"
17 
18 #include <platform_def.h>
19 
20 /**
21  * struct spmc_shmem_obj - Shared memory object.
22  * @desc_size:      Size of @desc.
23  * @desc_filled:    Size of @desc already received.
24  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
25  *                  without a matching ffa_mem_relinquish call.
26  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
27  */
28 struct spmc_shmem_obj {
29 	size_t desc_size;
30 	size_t desc_filled;
31 	size_t in_use;
32 	struct ffa_mtd desc;
33 };
34 
35 /*
36  * Declare our data structure to store the metadata of memory share requests.
37  * The main datastore is allocated on a per platform basis to ensure enough
38  * storage can be made available.
39  * The address of the data store will be populated by the SPMC during its
40  * initialization.
41  */
42 
43 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 	/* Set start value for handle so top 32 bits are needed quickly. */
45 	.next_handle = 0xffffffc0U,
46 };
47 
48 /**
49  * spmc_shmem_obj_size - Convert from descriptor size to object size.
50  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
51  *
52  * Return: Size of struct spmc_shmem_obj object.
53  */
54 static size_t spmc_shmem_obj_size(size_t desc_size)
55 {
56 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
57 }
58 
59 /**
60  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61  * @state:      Global state.
62  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
63  *              allocated object will hold.
64  *
65  * Return: Pointer to newly allocated object, or %NULL if there not enough space
66  *         left. The returned pointer is only valid while @state is locked, to
67  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
68  *         called.
69  */
70 static struct spmc_shmem_obj *
71 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72 {
73 	struct spmc_shmem_obj *obj;
74 	size_t free = state->data_size - state->allocated;
75 
76 	if (state->data == NULL) {
77 		ERROR("Missing shmem datastore!\n");
78 		return NULL;
79 	}
80 
81 	if (spmc_shmem_obj_size(desc_size) > free) {
82 		WARN("%s(0x%zx) failed, free 0x%zx\n",
83 		     __func__, desc_size, free);
84 		return NULL;
85 	}
86 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
87 	obj->desc = (struct ffa_mtd) {0};
88 	obj->desc_size = desc_size;
89 	obj->desc_filled = 0;
90 	obj->in_use = 0;
91 	state->allocated += spmc_shmem_obj_size(desc_size);
92 	return obj;
93 }
94 
95 /**
96  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
97  * @state:      Global state.
98  * @obj:        Object to free.
99  *
100  * Release memory used by @obj. Other objects may move, so on return all
101  * pointers to struct spmc_shmem_obj object should be considered invalid, not
102  * just @obj.
103  *
104  * The current implementation always compacts the remaining objects to simplify
105  * the allocator and to avoid fragmentation.
106  */
107 
108 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
109 				  struct spmc_shmem_obj *obj)
110 {
111 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
112 	uint8_t *shift_dest = (uint8_t *)obj;
113 	uint8_t *shift_src = shift_dest + free_size;
114 	size_t shift_size = state->allocated - (shift_src - state->data);
115 
116 	if (shift_size != 0U) {
117 		memmove(shift_dest, shift_src, shift_size);
118 	}
119 	state->allocated -= free_size;
120 }
121 
122 /**
123  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
124  * @state:      Global state.
125  * @handle:     Unique handle of object to return.
126  *
127  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
128  *         %NULL, if not object in @state->data has a matching handle.
129  */
130 static struct spmc_shmem_obj *
131 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
132 {
133 	uint8_t *curr = state->data;
134 
135 	while (curr - state->data < state->allocated) {
136 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
137 
138 		if (obj->desc.handle == handle) {
139 			return obj;
140 		}
141 		curr += spmc_shmem_obj_size(obj->desc_size);
142 	}
143 	return NULL;
144 }
145 
146 /**
147  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
148  * @offset:     Offset used to track which objects have previously been
149  *              returned.
150  *
151  * Return: the next struct spmc_shmem_obj_state object from the provided
152  *	   offset.
153  *	   %NULL, if there are no more objects.
154  */
155 static struct spmc_shmem_obj *
156 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
157 {
158 	uint8_t *curr = state->data + *offset;
159 
160 	if (curr - state->data < state->allocated) {
161 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
162 
163 		*offset += spmc_shmem_obj_size(obj->desc_size);
164 
165 		return obj;
166 	}
167 	return NULL;
168 }
169 
170 /*******************************************************************************
171  * FF-A memory descriptor helper functions.
172  ******************************************************************************/
173 /**
174  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
175  *                           clients FF-A version.
176  * @desc:         The memory transaction descriptor.
177  * @index:        The index of the emad element to be accessed.
178  * @ffa_version:  FF-A version of the provided structure.
179  * @emad_size:    Will be populated with the size of the returned emad
180  *                descriptor.
181  * Return: A pointer to the requested emad structure.
182  */
183 static void *
184 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
185 			uint32_t ffa_version, size_t *emad_size)
186 {
187 	uint8_t *emad;
188 	/*
189 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
190 	 * format, otherwise assume it is a v1.1 format.
191 	 */
192 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
193 		/* Cast our descriptor to the v1.0 format. */
194 		struct ffa_mtd_v1_0 *mtd_v1_0 =
195 					(struct ffa_mtd_v1_0 *) desc;
196 		emad = (uint8_t *)  &(mtd_v1_0->emad);
197 		*emad_size = sizeof(struct ffa_emad_v1_0);
198 	} else {
199 		if (!is_aligned(desc->emad_offset, 16)) {
200 			WARN("Emad offset is not aligned.\n");
201 			return NULL;
202 		}
203 		emad = ((uint8_t *) desc + desc->emad_offset);
204 		*emad_size = desc->emad_size;
205 	}
206 	return (emad + (*emad_size * index));
207 }
208 
209 /**
210  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
211  *				 FF-A version of the descriptor.
212  * @obj:    Object containing ffa_memory_region_descriptor.
213  *
214  * Return: struct ffa_comp_mrd object corresponding to the composite memory
215  *	   region descriptor.
216  */
217 static struct ffa_comp_mrd *
218 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
219 {
220 	size_t emad_size;
221 	/*
222 	 * The comp_mrd_offset field of the emad descriptor remains consistent
223 	 * between FF-A versions therefore we can use the v1.0 descriptor here
224 	 * in all cases.
225 	 */
226 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
227 							     ffa_version,
228 							     &emad_size);
229 	/* Ensure the emad array was found. */
230 	if (emad == NULL) {
231 		return NULL;
232 	}
233 
234 	/* Ensure the composite descriptor offset is aligned. */
235 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
236 		WARN("Unaligned composite memory region descriptor offset.\n");
237 		return NULL;
238 	}
239 
240 	return (struct ffa_comp_mrd *)
241 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
242 }
243 
244 /**
245  * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
246  * @obj:    Object containing ffa_memory_region_descriptor.
247  *
248  * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
249  */
250 static size_t
251 spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
252 				    uint32_t ffa_version)
253 {
254 	struct ffa_comp_mrd *comp_mrd;
255 
256 	comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
257 	if (comp_mrd == NULL) {
258 		return 0;
259 	}
260 	return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
261 }
262 
263 /*
264  * Compare two memory regions to determine if any range overlaps with another
265  * ongoing memory transaction.
266  */
267 static bool
268 overlapping_memory_regions(struct ffa_comp_mrd *region1,
269 			   struct ffa_comp_mrd *region2)
270 {
271 	uint64_t region1_start;
272 	uint64_t region1_size;
273 	uint64_t region1_end;
274 	uint64_t region2_start;
275 	uint64_t region2_size;
276 	uint64_t region2_end;
277 
278 	assert(region1 != NULL);
279 	assert(region2 != NULL);
280 
281 	if (region1 == region2) {
282 		return true;
283 	}
284 
285 	/*
286 	 * Check each memory region in the request against existing
287 	 * transactions.
288 	 */
289 	for (size_t i = 0; i < region1->address_range_count; i++) {
290 
291 		region1_start = region1->address_range_array[i].address;
292 		region1_size =
293 			region1->address_range_array[i].page_count *
294 			PAGE_SIZE_4KB;
295 		region1_end = region1_start + region1_size;
296 
297 		for (size_t j = 0; j < region2->address_range_count; j++) {
298 
299 			region2_start = region2->address_range_array[j].address;
300 			region2_size =
301 				region2->address_range_array[j].page_count *
302 				PAGE_SIZE_4KB;
303 			region2_end = region2_start + region2_size;
304 
305 			if ((region1_start >= region2_start &&
306 			     region1_start < region2_end) ||
307 			    (region1_end >= region2_start
308 			     && region1_end < region2_end)) {
309 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
310 				     region1_start, region1_end,
311 				     region2_start, region2_end);
312 				return true;
313 			}
314 		}
315 	}
316 	return false;
317 }
318 
319 /*******************************************************************************
320  * FF-A v1.0 Memory Descriptor Conversion Helpers.
321  ******************************************************************************/
322 /**
323  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
324  *                                     converted descriptor.
325  * @orig:       The original v1.0 memory transaction descriptor.
326  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
327  *
328  * Return: the size required to store the descriptor store in the v1.1 format.
329  */
330 static size_t
331 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
332 {
333 	size_t size = 0;
334 	struct ffa_comp_mrd *mrd;
335 	struct ffa_emad_v1_0 *emad_array = orig->emad;
336 
337 	/* Get the size of the v1.1 descriptor. */
338 	size += sizeof(struct ffa_mtd);
339 
340 	/* Add the size of the emad descriptors. */
341 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
342 
343 	/* Add the size of the composite mrds. */
344 	size += sizeof(struct ffa_comp_mrd);
345 
346 	/* Add the size of the constituent mrds. */
347 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
348 	      emad_array[0].comp_mrd_offset);
349 
350 	/* Check the calculated address is within the memory descriptor. */
351 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
352 		return 0;
353 	}
354 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
355 
356 	return size;
357 }
358 
359 /**
360  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
361  *                                     converted descriptor.
362  * @orig:       The original v1.1 memory transaction descriptor.
363  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
364  *
365  * Return: the size required to store the descriptor store in the v1.0 format.
366  */
367 static size_t
368 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
369 {
370 	size_t size = 0;
371 	struct ffa_comp_mrd *mrd;
372 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
373 					   ((uint8_t *) orig +
374 					    orig->emad_offset);
375 
376 	/* Get the size of the v1.0 descriptor. */
377 	size += sizeof(struct ffa_mtd_v1_0);
378 
379 	/* Add the size of the v1.0 emad descriptors. */
380 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
381 
382 	/* Add the size of the composite mrds. */
383 	size += sizeof(struct ffa_comp_mrd);
384 
385 	/* Add the size of the constituent mrds. */
386 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
387 	      emad_array[0].comp_mrd_offset);
388 
389 	/* Check the calculated address is within the memory descriptor. */
390 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
391 		return 0;
392 	}
393 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
394 
395 	return size;
396 }
397 
398 /**
399  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
400  * @out_obj:	The shared memory object to populate the converted descriptor.
401  * @orig:	The shared memory object containing the v1.0 descriptor.
402  *
403  * Return: true if the conversion is successful else false.
404  */
405 static bool
406 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
407 				     struct spmc_shmem_obj *orig)
408 {
409 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
410 	struct ffa_mtd *out = &out_obj->desc;
411 	struct ffa_emad_v1_0 *emad_array_in;
412 	struct ffa_emad_v1_0 *emad_array_out;
413 	struct ffa_comp_mrd *mrd_in;
414 	struct ffa_comp_mrd *mrd_out;
415 
416 	size_t mrd_in_offset;
417 	size_t mrd_out_offset;
418 	size_t mrd_size = 0;
419 
420 	/* Populate the new descriptor format from the v1.0 struct. */
421 	out->sender_id = mtd_orig->sender_id;
422 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
423 	out->flags = mtd_orig->flags;
424 	out->handle = mtd_orig->handle;
425 	out->tag = mtd_orig->tag;
426 	out->emad_count = mtd_orig->emad_count;
427 	out->emad_size = sizeof(struct ffa_emad_v1_0);
428 
429 	/*
430 	 * We will locate the emad descriptors directly after the ffa_mtd
431 	 * struct. This will be 8-byte aligned.
432 	 */
433 	out->emad_offset = sizeof(struct ffa_mtd);
434 
435 	emad_array_in = mtd_orig->emad;
436 	emad_array_out = (struct ffa_emad_v1_0 *)
437 			 ((uint8_t *) out + out->emad_offset);
438 
439 	/* Copy across the emad structs. */
440 	for (unsigned int i = 0U; i < out->emad_count; i++) {
441 		memcpy(&emad_array_out[i], &emad_array_in[i],
442 		       sizeof(struct ffa_emad_v1_0));
443 	}
444 
445 	/* Place the mrd descriptors after the end of the emad descriptors.*/
446 	mrd_in_offset = emad_array_in->comp_mrd_offset;
447 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
448 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
449 
450 	/* Add the size of the composite memory region descriptor. */
451 	mrd_size += sizeof(struct ffa_comp_mrd);
452 
453 	/* Find the mrd descriptor. */
454 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
455 
456 	/* Add the size of the constituent memory region descriptors. */
457 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
458 
459 	/*
460 	 * Update the offset in the emads by the delta between the input and
461 	 * output addresses.
462 	 */
463 	for (unsigned int i = 0U; i < out->emad_count; i++) {
464 		emad_array_out[i].comp_mrd_offset =
465 			emad_array_in[i].comp_mrd_offset +
466 			(mrd_out_offset - mrd_in_offset);
467 	}
468 
469 	/* Verify that we stay within bound of the memory descriptors. */
470 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
471 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
472 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
473 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
474 		ERROR("%s: Invalid mrd structure.\n", __func__);
475 		return false;
476 	}
477 
478 	/* Copy the mrd descriptors directly. */
479 	memcpy(mrd_out, mrd_in, mrd_size);
480 
481 	return true;
482 }
483 
484 /**
485  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
486  *                                v1.0 memory object.
487  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
488  * @orig:       The shared memory object containing the v1.1 descriptor.
489  *
490  * Return: true if the conversion is successful else false.
491  */
492 static bool
493 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
494 			     struct spmc_shmem_obj *orig)
495 {
496 	struct ffa_mtd *mtd_orig = &orig->desc;
497 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
498 	struct ffa_emad_v1_0 *emad_in;
499 	struct ffa_emad_v1_0 *emad_array_in;
500 	struct ffa_emad_v1_0 *emad_array_out;
501 	struct ffa_comp_mrd *mrd_in;
502 	struct ffa_comp_mrd *mrd_out;
503 
504 	size_t mrd_in_offset;
505 	size_t mrd_out_offset;
506 	size_t emad_out_array_size;
507 	size_t mrd_size = 0;
508 
509 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
510 	out->sender_id = mtd_orig->sender_id;
511 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
512 	out->flags = mtd_orig->flags;
513 	out->handle = mtd_orig->handle;
514 	out->tag = mtd_orig->tag;
515 	out->emad_count = mtd_orig->emad_count;
516 
517 	/* Determine the location of the emad array in both descriptors. */
518 	emad_array_in = (struct ffa_emad_v1_0 *)
519 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
520 	emad_array_out = out->emad;
521 
522 	/* Copy across the emad structs. */
523 	emad_in = emad_array_in;
524 	for (unsigned int i = 0U; i < out->emad_count; i++) {
525 		memcpy(&emad_array_out[i], emad_in,
526 		       sizeof(struct ffa_emad_v1_0));
527 
528 		emad_in +=  mtd_orig->emad_size;
529 	}
530 
531 	/* Place the mrd descriptors after the end of the emad descriptors. */
532 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
533 
534 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
535 			  emad_out_array_size;
536 
537 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
538 
539 	mrd_in_offset = mtd_orig->emad_offset +
540 			(mtd_orig->emad_size * mtd_orig->emad_count);
541 
542 	/* Add the size of the composite memory region descriptor. */
543 	mrd_size += sizeof(struct ffa_comp_mrd);
544 
545 	/* Find the mrd descriptor. */
546 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
547 
548 	/* Add the size of the constituent memory region descriptors. */
549 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
550 
551 	/*
552 	 * Update the offset in the emads by the delta between the input and
553 	 * output addresses.
554 	 */
555 	emad_in = emad_array_in;
556 
557 	for (unsigned int i = 0U; i < out->emad_count; i++) {
558 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
559 						    (mrd_out_offset -
560 						     mrd_in_offset);
561 		emad_in +=  mtd_orig->emad_size;
562 	}
563 
564 	/* Verify that we stay within bound of the memory descriptors. */
565 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
566 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
567 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
568 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
569 		ERROR("%s: Invalid mrd structure.\n", __func__);
570 		return false;
571 	}
572 
573 	/* Copy the mrd descriptors directly. */
574 	memcpy(mrd_out, mrd_in, mrd_size);
575 
576 	return true;
577 }
578 
579 /**
580  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
581  *                                     the v1.0 format and populates the
582  *                                     provided buffer.
583  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
584  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
585  * @buf_size:	    Size of the buffer to populate.
586  * @offset:	    The offset of the converted descriptor to copy.
587  * @copy_size:	    Will be populated with the number of bytes copied.
588  * @out_desc_size:  Will be populated with the total size of the v1.0
589  *                  descriptor.
590  *
591  * Return: 0 if conversion and population succeeded.
592  * Note: This function invalidates the reference to @orig therefore
593  * `spmc_shmem_obj_lookup` must be called if further usage is required.
594  */
595 static uint32_t
596 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
597 				 size_t buf_size, size_t offset,
598 				 size_t *copy_size, size_t *v1_0_desc_size)
599 {
600 		struct spmc_shmem_obj *v1_0_obj;
601 
602 		/* Calculate the size that the v1.0 descriptor will require. */
603 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
604 					&orig_obj->desc, orig_obj->desc_size);
605 
606 		if (*v1_0_desc_size == 0) {
607 			ERROR("%s: cannot determine size of descriptor.\n",
608 			      __func__);
609 			return FFA_ERROR_INVALID_PARAMETER;
610 		}
611 
612 		/* Get a new obj to store the v1.0 descriptor. */
613 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
614 						*v1_0_desc_size);
615 
616 		if (!v1_0_obj) {
617 			return FFA_ERROR_NO_MEMORY;
618 		}
619 
620 		/* Perform the conversion from v1.1 to v1.0. */
621 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
622 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
623 			return FFA_ERROR_INVALID_PARAMETER;
624 		}
625 
626 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
627 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
628 
629 		/*
630 		 * We're finished with the v1.0 descriptor for now so free it.
631 		 * Note that this will invalidate any references to the v1.1
632 		 * descriptor.
633 		 */
634 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
635 
636 		return 0;
637 }
638 
639 /**
640  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
641  * @obj:	  Object containing ffa_memory_region_descriptor.
642  * @ffa_version:  FF-A version of the provided descriptor.
643  *
644  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
645  * offset or count is invalid.
646  */
647 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
648 				uint32_t ffa_version)
649 {
650 	uint32_t comp_mrd_offset = 0;
651 
652 	if (obj->desc.emad_count == 0U) {
653 		WARN("%s: unsupported attribute desc count %u.\n",
654 		     __func__, obj->desc.emad_count);
655 		return -EINVAL;
656 	}
657 
658 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
659 		size_t size;
660 		size_t count;
661 		size_t expected_size;
662 		size_t total_page_count;
663 		size_t emad_size;
664 		size_t desc_size;
665 		size_t header_emad_size;
666 		uint32_t offset;
667 		struct ffa_comp_mrd *comp;
668 		struct ffa_emad_v1_0 *emad;
669 
670 		emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
671 					       ffa_version, &emad_size);
672 		if (emad == NULL) {
673 			WARN("%s: invalid emad structure.\n", __func__);
674 			return -EINVAL;
675 		}
676 
677 		/*
678 		 * Validate the calculated emad address resides within the
679 		 * descriptor.
680 		 */
681 		if ((uintptr_t) emad >=
682 		    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
683 			WARN("Invalid emad access.\n");
684 			return -EINVAL;
685 		}
686 
687 		offset = emad->comp_mrd_offset;
688 
689 		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
690 			desc_size =  sizeof(struct ffa_mtd_v1_0);
691 		} else {
692 			desc_size =  sizeof(struct ffa_mtd);
693 		}
694 
695 		header_emad_size = desc_size +
696 			(obj->desc.emad_count * emad_size);
697 
698 		if (offset < header_emad_size) {
699 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
700 			     __func__, offset, header_emad_size);
701 			return -EINVAL;
702 		}
703 
704 		size = obj->desc_size;
705 
706 		if (offset > size) {
707 			WARN("%s: invalid object, offset %u > total size %zu\n",
708 			     __func__, offset, obj->desc_size);
709 			return -EINVAL;
710 		}
711 		size -= offset;
712 
713 		if (size < sizeof(struct ffa_comp_mrd)) {
714 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
715 			     __func__, offset, obj->desc_size);
716 			return -EINVAL;
717 		}
718 		size -= sizeof(struct ffa_comp_mrd);
719 
720 		count = size / sizeof(struct ffa_cons_mrd);
721 
722 		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
723 
724 		if (comp == NULL) {
725 			WARN("%s: invalid comp_mrd offset\n", __func__);
726 			return -EINVAL;
727 		}
728 
729 		if (comp->address_range_count != count) {
730 			WARN("%s: invalid object, desc count %u != %zu\n",
731 			     __func__, comp->address_range_count, count);
732 			return -EINVAL;
733 		}
734 
735 		expected_size = offset + sizeof(*comp) +
736 				spmc_shmem_obj_ffa_constituent_size(obj,
737 								    ffa_version);
738 
739 		if (expected_size != obj->desc_size) {
740 			WARN("%s: invalid object, computed size %zu != size %zu\n",
741 			       __func__, expected_size, obj->desc_size);
742 			return -EINVAL;
743 		}
744 
745 		if (obj->desc_filled < obj->desc_size) {
746 			/*
747 			 * The whole descriptor has not yet been received.
748 			 * Skip final checks.
749 			 */
750 			return 0;
751 		}
752 
753 		/*
754 		 * The offset provided to the composite memory region descriptor
755 		 * should be consistent across endpoint descriptors. Store the
756 		 * first entry and compare against subsequent entries.
757 		 */
758 		if (comp_mrd_offset == 0) {
759 			comp_mrd_offset = offset;
760 		} else {
761 			if (comp_mrd_offset != offset) {
762 				ERROR("%s: mismatching offsets provided, %u != %u\n",
763 				       __func__, offset, comp_mrd_offset);
764 				return -EINVAL;
765 			}
766 		}
767 
768 		total_page_count = 0;
769 
770 		for (size_t i = 0; i < count; i++) {
771 			total_page_count +=
772 				comp->address_range_array[i].page_count;
773 		}
774 		if (comp->total_page_count != total_page_count) {
775 			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
776 			     __func__, comp->total_page_count,
777 			total_page_count);
778 			return -EINVAL;
779 		}
780 	}
781 	return 0;
782 }
783 
784 /**
785  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
786  *				regions that are currently involved with an
787  *				existing memory transactions. This implies that
788  *				the memory is not in a valid state for lending.
789  * @obj:    Object containing ffa_memory_region_descriptor.
790  *
791  * Return: 0 if object is valid, -EINVAL if invalid memory state.
792  */
793 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
794 				      uint32_t ffa_version)
795 {
796 	size_t obj_offset = 0;
797 	struct spmc_shmem_obj *inflight_obj;
798 
799 	struct ffa_comp_mrd *other_mrd;
800 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
801 								  ffa_version);
802 
803 	if (requested_mrd == NULL) {
804 		return -EINVAL;
805 	}
806 
807 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
808 					       &obj_offset);
809 
810 	while (inflight_obj != NULL) {
811 		/*
812 		 * Don't compare the transaction to itself or to partially
813 		 * transmitted descriptors.
814 		 */
815 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
816 		    (obj->desc_size == obj->desc_filled)) {
817 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
818 								ffa_version);
819 			if (other_mrd == NULL) {
820 				return -EINVAL;
821 			}
822 			if (overlapping_memory_regions(requested_mrd,
823 						       other_mrd)) {
824 				return -EINVAL;
825 			}
826 		}
827 
828 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
829 						       &obj_offset);
830 	}
831 	return 0;
832 }
833 
834 static long spmc_ffa_fill_desc(struct mailbox *mbox,
835 			       struct spmc_shmem_obj *obj,
836 			       uint32_t fragment_length,
837 			       ffa_mtd_flag32_t mtd_flag,
838 			       uint32_t ffa_version,
839 			       void *smc_handle)
840 {
841 	int ret;
842 	size_t emad_size;
843 	uint32_t handle_low;
844 	uint32_t handle_high;
845 	struct ffa_emad_v1_0 *emad;
846 	struct ffa_emad_v1_0 *other_emad;
847 
848 	if (mbox->rxtx_page_count == 0U) {
849 		WARN("%s: buffer pair not registered.\n", __func__);
850 		ret = FFA_ERROR_INVALID_PARAMETER;
851 		goto err_arg;
852 	}
853 
854 	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
855 		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
856 		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
857 		ret = FFA_ERROR_INVALID_PARAMETER;
858 		goto err_arg;
859 	}
860 
861 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
862 	       (uint8_t *) mbox->tx_buffer, fragment_length);
863 
864 	if (fragment_length > obj->desc_size - obj->desc_filled) {
865 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
866 		     fragment_length, obj->desc_size - obj->desc_filled);
867 		ret = FFA_ERROR_INVALID_PARAMETER;
868 		goto err_arg;
869 	}
870 
871 	/* Ensure that the sender ID resides in the normal world. */
872 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
873 		WARN("%s: Invalid sender ID 0x%x.\n",
874 		     __func__, obj->desc.sender_id);
875 		ret = FFA_ERROR_DENIED;
876 		goto err_arg;
877 	}
878 
879 	/* Ensure the NS bit is set to 0. */
880 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
881 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
882 		ret = FFA_ERROR_INVALID_PARAMETER;
883 		goto err_arg;
884 	}
885 
886 	/*
887 	 * We don't currently support any optional flags so ensure none are
888 	 * requested.
889 	 */
890 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
891 	    (obj->desc.flags != mtd_flag)) {
892 		WARN("%s: invalid memory transaction flags %u != %u\n",
893 		     __func__, obj->desc.flags, mtd_flag);
894 		ret = FFA_ERROR_INVALID_PARAMETER;
895 		goto err_arg;
896 	}
897 
898 	if (obj->desc_filled == 0U) {
899 		/* First fragment, descriptor header has been copied */
900 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
901 		obj->desc.flags |= mtd_flag;
902 	}
903 
904 	obj->desc_filled += fragment_length;
905 	ret = spmc_shmem_check_obj(obj, ffa_version);
906 	if (ret != 0) {
907 		ret = FFA_ERROR_INVALID_PARAMETER;
908 		goto err_bad_desc;
909 	}
910 
911 	handle_low = (uint32_t)obj->desc.handle;
912 	handle_high = obj->desc.handle >> 32;
913 
914 	if (obj->desc_filled != obj->desc_size) {
915 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
916 			 handle_high, obj->desc_filled,
917 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
918 	}
919 
920 	/* The full descriptor has been received, perform any final checks. */
921 
922 	/*
923 	 * If a partition ID resides in the secure world validate that the
924 	 * partition ID is for a known partition. Ignore any partition ID
925 	 * belonging to the normal world as it is assumed the Hypervisor will
926 	 * have validated these.
927 	 */
928 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
929 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
930 					       &emad_size);
931 		if (emad == NULL) {
932 			ret = FFA_ERROR_INVALID_PARAMETER;
933 			goto err_bad_desc;
934 		}
935 
936 		ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
937 
938 		if (ffa_is_secure_world_id(ep_id)) {
939 			if (spmc_get_sp_ctx(ep_id) == NULL) {
940 				WARN("%s: Invalid receiver id 0x%x\n",
941 				     __func__, ep_id);
942 				ret = FFA_ERROR_INVALID_PARAMETER;
943 				goto err_bad_desc;
944 			}
945 		}
946 	}
947 
948 	/* Ensure partition IDs are not duplicated. */
949 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
950 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
951 					       &emad_size);
952 		if (emad == NULL) {
953 			ret = FFA_ERROR_INVALID_PARAMETER;
954 			goto err_bad_desc;
955 		}
956 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
957 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
958 							     ffa_version,
959 							     &emad_size);
960 			if (other_emad == NULL) {
961 				ret = FFA_ERROR_INVALID_PARAMETER;
962 				goto err_bad_desc;
963 			}
964 
965 			if (emad->mapd.endpoint_id ==
966 				other_emad->mapd.endpoint_id) {
967 				WARN("%s: Duplicated endpoint id 0x%x\n",
968 				     __func__, emad->mapd.endpoint_id);
969 				ret = FFA_ERROR_INVALID_PARAMETER;
970 				goto err_bad_desc;
971 			}
972 		}
973 	}
974 
975 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
976 	if (ret) {
977 		ERROR("%s: invalid memory region descriptor.\n", __func__);
978 		ret = FFA_ERROR_INVALID_PARAMETER;
979 		goto err_bad_desc;
980 	}
981 
982 	/*
983 	 * Everything checks out, if the sender was using FF-A v1.0, convert
984 	 * the descriptor format to use the v1.1 structures.
985 	 */
986 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
987 		struct spmc_shmem_obj *v1_1_obj;
988 		uint64_t mem_handle;
989 
990 		/* Calculate the size that the v1.1 descriptor will required. */
991 		size_t v1_1_desc_size =
992 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
993 						      fragment_length);
994 
995 		if (v1_1_desc_size == 0U) {
996 			ERROR("%s: cannot determine size of descriptor.\n",
997 			      __func__);
998 			goto err_arg;
999 		}
1000 
1001 		/* Get a new obj to store the v1.1 descriptor. */
1002 		v1_1_obj =
1003 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1004 
1005 		if (!obj) {
1006 			ret = FFA_ERROR_NO_MEMORY;
1007 			goto err_arg;
1008 		}
1009 
1010 		/* Perform the conversion from v1.0 to v1.1. */
1011 		v1_1_obj->desc_size = v1_1_desc_size;
1012 		v1_1_obj->desc_filled = v1_1_desc_size;
1013 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1014 			ERROR("%s: Could not convert mtd!\n", __func__);
1015 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1016 			goto err_arg;
1017 		}
1018 
1019 		/*
1020 		 * We're finished with the v1.0 descriptor so free it
1021 		 * and continue our checks with the new v1.1 descriptor.
1022 		 */
1023 		mem_handle = obj->desc.handle;
1024 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1025 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1026 		if (obj == NULL) {
1027 			ERROR("%s: Failed to find converted descriptor.\n",
1028 			     __func__);
1029 			ret = FFA_ERROR_INVALID_PARAMETER;
1030 			return spmc_ffa_error_return(smc_handle, ret);
1031 		}
1032 	}
1033 
1034 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1035 		 0, 0, 0);
1036 
1037 err_bad_desc:
1038 err_arg:
1039 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1040 	return spmc_ffa_error_return(smc_handle, ret);
1041 }
1042 
1043 /**
1044  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1045  * @client:             Client state.
1046  * @total_length:       Total length of shared memory descriptor.
1047  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1048  *                      this call.
1049  * @address:            Not supported, must be 0.
1050  * @page_count:         Not supported, must be 0.
1051  * @smc_handle:         Handle passed to smc call. Used to return
1052  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1053  *
1054  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1055  * to share or lend memory from non-secure os to secure os (with no stream
1056  * endpoints).
1057  *
1058  * Return: 0 on success, error code on failure.
1059  */
1060 long spmc_ffa_mem_send(uint32_t smc_fid,
1061 			bool secure_origin,
1062 			uint64_t total_length,
1063 			uint32_t fragment_length,
1064 			uint64_t address,
1065 			uint32_t page_count,
1066 			void *cookie,
1067 			void *handle,
1068 			uint64_t flags)
1069 
1070 {
1071 	long ret;
1072 	struct spmc_shmem_obj *obj;
1073 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1074 	ffa_mtd_flag32_t mtd_flag;
1075 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1076 
1077 	if (address != 0U || page_count != 0U) {
1078 		WARN("%s: custom memory region for message not supported.\n",
1079 		     __func__);
1080 		return spmc_ffa_error_return(handle,
1081 					     FFA_ERROR_INVALID_PARAMETER);
1082 	}
1083 
1084 	if (secure_origin) {
1085 		WARN("%s: unsupported share direction.\n", __func__);
1086 		return spmc_ffa_error_return(handle,
1087 					     FFA_ERROR_INVALID_PARAMETER);
1088 	}
1089 
1090 	/*
1091 	 * Check if the descriptor is smaller than the v1.0 descriptor. The
1092 	 * descriptor cannot be smaller than this structure.
1093 	 */
1094 	if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
1095 		WARN("%s: bad first fragment size %u < %zu\n",
1096 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1097 		return spmc_ffa_error_return(handle,
1098 					     FFA_ERROR_INVALID_PARAMETER);
1099 	}
1100 
1101 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1102 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1103 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1104 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1105 	} else {
1106 		WARN("%s: invalid memory management operation.\n", __func__);
1107 		return spmc_ffa_error_return(handle,
1108 					     FFA_ERROR_INVALID_PARAMETER);
1109 	}
1110 
1111 	spin_lock(&spmc_shmem_obj_state.lock);
1112 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1113 	if (obj == NULL) {
1114 		ret = FFA_ERROR_NO_MEMORY;
1115 		goto err_unlock;
1116 	}
1117 
1118 	spin_lock(&mbox->lock);
1119 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1120 				 ffa_version, handle);
1121 	spin_unlock(&mbox->lock);
1122 
1123 	spin_unlock(&spmc_shmem_obj_state.lock);
1124 	return ret;
1125 
1126 err_unlock:
1127 	spin_unlock(&spmc_shmem_obj_state.lock);
1128 	return spmc_ffa_error_return(handle, ret);
1129 }
1130 
1131 /**
1132  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1133  * @client:             Client state.
1134  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1135  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1136  * @fragment_length:    Length of fragments transmitted.
1137  * @sender_id:          Vmid of sender in bits [31:16]
1138  * @smc_handle:         Handle passed to smc call. Used to return
1139  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1140  *
1141  * Return: @smc_handle on success, error code on failure.
1142  */
1143 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1144 			  bool secure_origin,
1145 			  uint64_t handle_low,
1146 			  uint64_t handle_high,
1147 			  uint32_t fragment_length,
1148 			  uint32_t sender_id,
1149 			  void *cookie,
1150 			  void *handle,
1151 			  uint64_t flags)
1152 {
1153 	long ret;
1154 	uint32_t desc_sender_id;
1155 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1156 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1157 
1158 	struct spmc_shmem_obj *obj;
1159 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1160 
1161 	spin_lock(&spmc_shmem_obj_state.lock);
1162 
1163 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1164 	if (obj == NULL) {
1165 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1166 		     __func__, mem_handle);
1167 		ret = FFA_ERROR_INVALID_PARAMETER;
1168 		goto err_unlock;
1169 	}
1170 
1171 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1172 	if (sender_id != desc_sender_id) {
1173 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1174 		     sender_id, desc_sender_id);
1175 		ret = FFA_ERROR_INVALID_PARAMETER;
1176 		goto err_unlock;
1177 	}
1178 
1179 	if (obj->desc_filled == obj->desc_size) {
1180 		WARN("%s: object desc already filled, %zu\n", __func__,
1181 		     obj->desc_filled);
1182 		ret = FFA_ERROR_INVALID_PARAMETER;
1183 		goto err_unlock;
1184 	}
1185 
1186 	spin_lock(&mbox->lock);
1187 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1188 				 handle);
1189 	spin_unlock(&mbox->lock);
1190 
1191 	spin_unlock(&spmc_shmem_obj_state.lock);
1192 	return ret;
1193 
1194 err_unlock:
1195 	spin_unlock(&spmc_shmem_obj_state.lock);
1196 	return spmc_ffa_error_return(handle, ret);
1197 }
1198 
1199 /**
1200  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1201  *				      if the caller implements a version greater
1202  *				      than FF-A 1.0 or if they have requested
1203  *				      the functionality.
1204  *				      TODO: We are assuming that the caller is
1205  *				      an SP. To support retrieval from the
1206  *				      normal world this function will need to be
1207  *				      expanded accordingly.
1208  * @resp:       Descriptor populated in callers RX buffer.
1209  * @sp_ctx:     Context of the calling SP.
1210  */
1211 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1212 			 struct secure_partition_desc *sp_ctx)
1213 {
1214 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1215 	    sp_ctx->ns_bit_requested) {
1216 		/*
1217 		 * Currently memory senders must reside in the normal
1218 		 * world, and we do not have the functionlaity to change
1219 		 * the state of memory dynamically. Therefore we can always set
1220 		 * the NS bit to 1.
1221 		 */
1222 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1223 	}
1224 }
1225 
1226 /**
1227  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1228  * @smc_fid:            FID of SMC
1229  * @total_length:       Total length of retrieve request descriptor if this is
1230  *                      the first call. Otherwise (unsupported) must be 0.
1231  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1232  *                      in this call. Only @fragment_length == @length is
1233  *                      supported by this implementation.
1234  * @address:            Not supported, must be 0.
1235  * @page_count:         Not supported, must be 0.
1236  * @smc_handle:         Handle passed to smc call. Used to return
1237  *                      FFA_MEM_RETRIEVE_RESP.
1238  *
1239  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1240  * Used by secure os to retrieve memory already shared by non-secure os.
1241  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1242  * the client must call FFA_MEM_FRAG_RX until the full response has been
1243  * received.
1244  *
1245  * Return: @handle on success, error code on failure.
1246  */
1247 long
1248 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1249 			  bool secure_origin,
1250 			  uint32_t total_length,
1251 			  uint32_t fragment_length,
1252 			  uint64_t address,
1253 			  uint32_t page_count,
1254 			  void *cookie,
1255 			  void *handle,
1256 			  uint64_t flags)
1257 {
1258 	int ret;
1259 	size_t buf_size;
1260 	size_t copy_size = 0;
1261 	size_t min_desc_size;
1262 	size_t out_desc_size = 0;
1263 
1264 	/*
1265 	 * Currently we are only accessing fields that are the same in both the
1266 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1267 	 * here. We only need validate against the appropriate struct size.
1268 	 */
1269 	struct ffa_mtd *resp;
1270 	const struct ffa_mtd *req;
1271 	struct spmc_shmem_obj *obj = NULL;
1272 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1273 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1274 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1275 
1276 	if (!secure_origin) {
1277 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1278 		return spmc_ffa_error_return(handle,
1279 					     FFA_ERROR_INVALID_PARAMETER);
1280 	}
1281 
1282 	if (address != 0U || page_count != 0U) {
1283 		WARN("%s: custom memory region not supported.\n", __func__);
1284 		return spmc_ffa_error_return(handle,
1285 					     FFA_ERROR_INVALID_PARAMETER);
1286 	}
1287 
1288 	spin_lock(&mbox->lock);
1289 
1290 	req = mbox->tx_buffer;
1291 	resp = mbox->rx_buffer;
1292 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1293 
1294 	if (mbox->rxtx_page_count == 0U) {
1295 		WARN("%s: buffer pair not registered.\n", __func__);
1296 		ret = FFA_ERROR_INVALID_PARAMETER;
1297 		goto err_unlock_mailbox;
1298 	}
1299 
1300 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1301 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1302 		ret = FFA_ERROR_DENIED;
1303 		goto err_unlock_mailbox;
1304 	}
1305 
1306 	if (fragment_length != total_length) {
1307 		WARN("%s: fragmented retrieve request not supported.\n",
1308 		     __func__);
1309 		ret = FFA_ERROR_INVALID_PARAMETER;
1310 		goto err_unlock_mailbox;
1311 	}
1312 
1313 	if (req->emad_count == 0U) {
1314 		WARN("%s: unsupported attribute desc count %u.\n",
1315 		     __func__, obj->desc.emad_count);
1316 		return -EINVAL;
1317 	}
1318 
1319 	/* Determine the appropriate minimum descriptor size. */
1320 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1321 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1322 	} else {
1323 		min_desc_size = sizeof(struct ffa_mtd);
1324 	}
1325 	if (total_length < min_desc_size) {
1326 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1327 		     min_desc_size);
1328 		ret = FFA_ERROR_INVALID_PARAMETER;
1329 		goto err_unlock_mailbox;
1330 	}
1331 
1332 	spin_lock(&spmc_shmem_obj_state.lock);
1333 
1334 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1335 	if (obj == NULL) {
1336 		ret = FFA_ERROR_INVALID_PARAMETER;
1337 		goto err_unlock_all;
1338 	}
1339 
1340 	if (obj->desc_filled != obj->desc_size) {
1341 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1342 		     __func__, obj->desc_filled, obj->desc_size);
1343 		ret = FFA_ERROR_INVALID_PARAMETER;
1344 		goto err_unlock_all;
1345 	}
1346 
1347 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1348 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1349 		     __func__, req->sender_id, obj->desc.sender_id);
1350 		ret = FFA_ERROR_INVALID_PARAMETER;
1351 		goto err_unlock_all;
1352 	}
1353 
1354 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1355 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1356 		     __func__, req->tag, obj->desc.tag);
1357 		ret = FFA_ERROR_INVALID_PARAMETER;
1358 		goto err_unlock_all;
1359 	}
1360 
1361 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1362 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1363 		     __func__, req->emad_count, obj->desc.emad_count);
1364 		ret = FFA_ERROR_INVALID_PARAMETER;
1365 		goto err_unlock_all;
1366 	}
1367 
1368 	/* Ensure the NS bit is set to 0 in the request. */
1369 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1370 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1371 		ret = FFA_ERROR_INVALID_PARAMETER;
1372 		goto err_unlock_all;
1373 	}
1374 
1375 	if (req->flags != 0U) {
1376 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1377 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1378 			/*
1379 			 * If the retrieve request specifies the memory
1380 			 * transaction ensure it matches what we expect.
1381 			 */
1382 			WARN("%s: wrong mem transaction flags %x != %x\n",
1383 			__func__, req->flags, obj->desc.flags);
1384 			ret = FFA_ERROR_INVALID_PARAMETER;
1385 			goto err_unlock_all;
1386 		}
1387 
1388 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1389 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1390 			/*
1391 			 * Current implementation does not support donate and
1392 			 * it supports no other flags.
1393 			 */
1394 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1395 			ret = FFA_ERROR_INVALID_PARAMETER;
1396 			goto err_unlock_all;
1397 		}
1398 	}
1399 
1400 	/* Validate that the provided emad offset and structure is valid.*/
1401 	for (size_t i = 0; i < req->emad_count; i++) {
1402 		size_t emad_size;
1403 		struct ffa_emad_v1_0 *emad;
1404 
1405 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1406 					       &emad_size);
1407 		if (emad == NULL) {
1408 			WARN("%s: invalid emad structure.\n", __func__);
1409 			ret = FFA_ERROR_INVALID_PARAMETER;
1410 			goto err_unlock_all;
1411 		}
1412 
1413 		if ((uintptr_t) emad >= (uintptr_t)
1414 					((uint8_t *) req + total_length)) {
1415 			WARN("Invalid emad access.\n");
1416 			ret = FFA_ERROR_INVALID_PARAMETER;
1417 			goto err_unlock_all;
1418 		}
1419 	}
1420 
1421 	/*
1422 	 * Validate all the endpoints match in the case of multiple
1423 	 * borrowers. We don't mandate that the order of the borrowers
1424 	 * must match in the descriptors therefore check to see if the
1425 	 * endpoints match in any order.
1426 	 */
1427 	for (size_t i = 0; i < req->emad_count; i++) {
1428 		bool found = false;
1429 		size_t emad_size;
1430 		struct ffa_emad_v1_0 *emad;
1431 		struct ffa_emad_v1_0 *other_emad;
1432 
1433 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1434 					       &emad_size);
1435 		if (emad == NULL) {
1436 			ret = FFA_ERROR_INVALID_PARAMETER;
1437 			goto err_unlock_all;
1438 		}
1439 
1440 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1441 			other_emad = spmc_shmem_obj_get_emad(
1442 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1443 					&emad_size);
1444 
1445 			if (other_emad == NULL) {
1446 				ret = FFA_ERROR_INVALID_PARAMETER;
1447 				goto err_unlock_all;
1448 			}
1449 
1450 			if (req->emad_count &&
1451 			    emad->mapd.endpoint_id ==
1452 			    other_emad->mapd.endpoint_id) {
1453 				found = true;
1454 				break;
1455 			}
1456 		}
1457 
1458 		if (!found) {
1459 			WARN("%s: invalid receiver id (0x%x).\n",
1460 			     __func__, emad->mapd.endpoint_id);
1461 			ret = FFA_ERROR_INVALID_PARAMETER;
1462 			goto err_unlock_all;
1463 		}
1464 	}
1465 
1466 	mbox->state = MAILBOX_STATE_FULL;
1467 
1468 	if (req->emad_count != 0U) {
1469 		obj->in_use++;
1470 	}
1471 
1472 	/*
1473 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1474 	 * directly.
1475 	 */
1476 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1477 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1478 							&copy_size,
1479 							&out_desc_size);
1480 		if (ret != 0U) {
1481 			ERROR("%s: Failed to process descriptor.\n", __func__);
1482 			goto err_unlock_all;
1483 		}
1484 	} else {
1485 		copy_size = MIN(obj->desc_size, buf_size);
1486 		out_desc_size = obj->desc_size;
1487 
1488 		memcpy(resp, &obj->desc, copy_size);
1489 	}
1490 
1491 	/* Set the NS bit in the response if applicable. */
1492 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1493 
1494 	spin_unlock(&spmc_shmem_obj_state.lock);
1495 	spin_unlock(&mbox->lock);
1496 
1497 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1498 		 copy_size, 0, 0, 0, 0, 0);
1499 
1500 err_unlock_all:
1501 	spin_unlock(&spmc_shmem_obj_state.lock);
1502 err_unlock_mailbox:
1503 	spin_unlock(&mbox->lock);
1504 	return spmc_ffa_error_return(handle, ret);
1505 }
1506 
1507 /**
1508  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1509  * @client:             Client state.
1510  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1511  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1512  * @fragment_offset:    Byte offset in descriptor to resume at.
1513  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1514  *                      hypervisor. 0 otherwise.
1515  * @smc_handle:         Handle passed to smc call. Used to return
1516  *                      FFA_MEM_FRAG_TX.
1517  *
1518  * Return: @smc_handle on success, error code on failure.
1519  */
1520 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1521 			  bool secure_origin,
1522 			  uint32_t handle_low,
1523 			  uint32_t handle_high,
1524 			  uint32_t fragment_offset,
1525 			  uint32_t sender_id,
1526 			  void *cookie,
1527 			  void *handle,
1528 			  uint64_t flags)
1529 {
1530 	int ret;
1531 	void *src;
1532 	size_t buf_size;
1533 	size_t copy_size;
1534 	size_t full_copy_size;
1535 	uint32_t desc_sender_id;
1536 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1537 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1538 	struct spmc_shmem_obj *obj;
1539 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1540 
1541 	if (!secure_origin) {
1542 		WARN("%s: can only be called from swld.\n",
1543 		     __func__);
1544 		return spmc_ffa_error_return(handle,
1545 					     FFA_ERROR_INVALID_PARAMETER);
1546 	}
1547 
1548 	spin_lock(&spmc_shmem_obj_state.lock);
1549 
1550 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1551 	if (obj == NULL) {
1552 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1553 		     __func__, mem_handle);
1554 		ret = FFA_ERROR_INVALID_PARAMETER;
1555 		goto err_unlock_shmem;
1556 	}
1557 
1558 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1559 	if (sender_id != 0U && sender_id != desc_sender_id) {
1560 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1561 		     sender_id, desc_sender_id);
1562 		ret = FFA_ERROR_INVALID_PARAMETER;
1563 		goto err_unlock_shmem;
1564 	}
1565 
1566 	if (fragment_offset >= obj->desc_size) {
1567 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1568 		     __func__, fragment_offset, obj->desc_size);
1569 		ret = FFA_ERROR_INVALID_PARAMETER;
1570 		goto err_unlock_shmem;
1571 	}
1572 
1573 	spin_lock(&mbox->lock);
1574 
1575 	if (mbox->rxtx_page_count == 0U) {
1576 		WARN("%s: buffer pair not registered.\n", __func__);
1577 		ret = FFA_ERROR_INVALID_PARAMETER;
1578 		goto err_unlock_all;
1579 	}
1580 
1581 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1582 		WARN("%s: RX Buffer is full!\n", __func__);
1583 		ret = FFA_ERROR_DENIED;
1584 		goto err_unlock_all;
1585 	}
1586 
1587 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1588 
1589 	mbox->state = MAILBOX_STATE_FULL;
1590 
1591 	/*
1592 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1593 	 * directly.
1594 	 */
1595 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1596 		size_t out_desc_size;
1597 
1598 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1599 							buf_size,
1600 							fragment_offset,
1601 							&copy_size,
1602 							&out_desc_size);
1603 		if (ret != 0U) {
1604 			ERROR("%s: Failed to process descriptor.\n", __func__);
1605 			goto err_unlock_all;
1606 		}
1607 	} else {
1608 		full_copy_size = obj->desc_size - fragment_offset;
1609 		copy_size = MIN(full_copy_size, buf_size);
1610 
1611 		src = &obj->desc;
1612 
1613 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1614 	}
1615 
1616 	spin_unlock(&mbox->lock);
1617 	spin_unlock(&spmc_shmem_obj_state.lock);
1618 
1619 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1620 		 copy_size, sender_id, 0, 0, 0);
1621 
1622 err_unlock_all:
1623 	spin_unlock(&mbox->lock);
1624 err_unlock_shmem:
1625 	spin_unlock(&spmc_shmem_obj_state.lock);
1626 	return spmc_ffa_error_return(handle, ret);
1627 }
1628 
1629 /**
1630  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1631  * @client:             Client state.
1632  *
1633  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1634  * Used by secure os release previously shared memory to non-secure os.
1635  *
1636  * The handle to release must be in the client's (secure os's) transmit buffer.
1637  *
1638  * Return: 0 on success, error code on failure.
1639  */
1640 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1641 			    bool secure_origin,
1642 			    uint32_t handle_low,
1643 			    uint32_t handle_high,
1644 			    uint32_t fragment_offset,
1645 			    uint32_t sender_id,
1646 			    void *cookie,
1647 			    void *handle,
1648 			    uint64_t flags)
1649 {
1650 	int ret;
1651 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1652 	struct spmc_shmem_obj *obj;
1653 	const struct ffa_mem_relinquish_descriptor *req;
1654 
1655 	if (!secure_origin) {
1656 		WARN("%s: unsupported relinquish direction.\n", __func__);
1657 		return spmc_ffa_error_return(handle,
1658 					     FFA_ERROR_INVALID_PARAMETER);
1659 	}
1660 
1661 	spin_lock(&mbox->lock);
1662 
1663 	if (mbox->rxtx_page_count == 0U) {
1664 		WARN("%s: buffer pair not registered.\n", __func__);
1665 		ret = FFA_ERROR_INVALID_PARAMETER;
1666 		goto err_unlock_mailbox;
1667 	}
1668 
1669 	req = mbox->tx_buffer;
1670 
1671 	if (req->flags != 0U) {
1672 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1673 		ret = FFA_ERROR_INVALID_PARAMETER;
1674 		goto err_unlock_mailbox;
1675 	}
1676 
1677 	if (req->endpoint_count == 0) {
1678 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1679 		ret = FFA_ERROR_INVALID_PARAMETER;
1680 		goto err_unlock_mailbox;
1681 	}
1682 
1683 	spin_lock(&spmc_shmem_obj_state.lock);
1684 
1685 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1686 	if (obj == NULL) {
1687 		ret = FFA_ERROR_INVALID_PARAMETER;
1688 		goto err_unlock_all;
1689 	}
1690 
1691 	if (obj->desc.emad_count != req->endpoint_count) {
1692 		WARN("%s: mismatch of endpoint count %u != %u\n", __func__,
1693 		     obj->desc.emad_count, req->endpoint_count);
1694 		ret = FFA_ERROR_INVALID_PARAMETER;
1695 		goto err_unlock_all;
1696 	}
1697 
1698 	/* Validate requested endpoint IDs match descriptor. */
1699 	for (size_t i = 0; i < req->endpoint_count; i++) {
1700 		bool found = false;
1701 		size_t emad_size;
1702 		struct ffa_emad_v1_0 *emad;
1703 
1704 		for (unsigned int j = 0; j < obj->desc.emad_count; j++) {
1705 			emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1706 							MAKE_FFA_VERSION(1, 1),
1707 							&emad_size);
1708 			if (req->endpoint_array[i] ==
1709 			    emad->mapd.endpoint_id) {
1710 				found = true;
1711 				break;
1712 			}
1713 		}
1714 
1715 		if (!found) {
1716 			WARN("%s: Invalid endpoint ID (0x%x).\n",
1717 			     __func__, req->endpoint_array[i]);
1718 			ret = FFA_ERROR_INVALID_PARAMETER;
1719 			goto err_unlock_all;
1720 		}
1721 	}
1722 
1723 	if (obj->in_use == 0U) {
1724 		ret = FFA_ERROR_INVALID_PARAMETER;
1725 		goto err_unlock_all;
1726 	}
1727 	obj->in_use--;
1728 
1729 	spin_unlock(&spmc_shmem_obj_state.lock);
1730 	spin_unlock(&mbox->lock);
1731 
1732 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1733 
1734 err_unlock_all:
1735 	spin_unlock(&spmc_shmem_obj_state.lock);
1736 err_unlock_mailbox:
1737 	spin_unlock(&mbox->lock);
1738 	return spmc_ffa_error_return(handle, ret);
1739 }
1740 
1741 /**
1742  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1743  * @client:         Client state.
1744  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1745  * @handle_high:    Unique handle of shared memory object to reclaim.
1746  *                  Bit[63:32].
1747  * @flags:          Unsupported, ignored.
1748  *
1749  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1750  * Used by non-secure os reclaim memory previously shared with secure os.
1751  *
1752  * Return: 0 on success, error code on failure.
1753  */
1754 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1755 			 bool secure_origin,
1756 			 uint32_t handle_low,
1757 			 uint32_t handle_high,
1758 			 uint32_t mem_flags,
1759 			 uint64_t x4,
1760 			 void *cookie,
1761 			 void *handle,
1762 			 uint64_t flags)
1763 {
1764 	int ret;
1765 	struct spmc_shmem_obj *obj;
1766 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1767 
1768 	if (secure_origin) {
1769 		WARN("%s: unsupported reclaim direction.\n", __func__);
1770 		return spmc_ffa_error_return(handle,
1771 					     FFA_ERROR_INVALID_PARAMETER);
1772 	}
1773 
1774 	if (mem_flags != 0U) {
1775 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1776 		return spmc_ffa_error_return(handle,
1777 					     FFA_ERROR_INVALID_PARAMETER);
1778 	}
1779 
1780 	spin_lock(&spmc_shmem_obj_state.lock);
1781 
1782 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1783 	if (obj == NULL) {
1784 		ret = FFA_ERROR_INVALID_PARAMETER;
1785 		goto err_unlock;
1786 	}
1787 	if (obj->in_use != 0U) {
1788 		ret = FFA_ERROR_DENIED;
1789 		goto err_unlock;
1790 	}
1791 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1792 	spin_unlock(&spmc_shmem_obj_state.lock);
1793 
1794 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1795 
1796 err_unlock:
1797 	spin_unlock(&spmc_shmem_obj_state.lock);
1798 	return spmc_ffa_error_return(handle, ret);
1799 }
1800