xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision 6dc5979a6cb2121e4c16e7bd62e24030e0f42755)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 
9 #include <common/debug.h>
10 #include <common/runtime_svc.h>
11 #include <lib/object_pool.h>
12 #include <lib/spinlock.h>
13 #include <lib/xlat_tables/xlat_tables_v2.h>
14 #include <services/ffa_svc.h>
15 #include "spmc.h"
16 #include "spmc_shared_mem.h"
17 
18 #include <platform_def.h>
19 
20 /**
21  * struct spmc_shmem_obj - Shared memory object.
22  * @desc_size:      Size of @desc.
23  * @desc_filled:    Size of @desc already received.
24  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
25  *                  without a matching ffa_mem_relinquish call.
26  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
27  */
28 struct spmc_shmem_obj {
29 	size_t desc_size;
30 	size_t desc_filled;
31 	size_t in_use;
32 	struct ffa_mtd desc;
33 };
34 
35 /*
36  * Declare our data structure to store the metadata of memory share requests.
37  * The main datastore is allocated on a per platform basis to ensure enough
38  * storage can be made available.
39  * The address of the data store will be populated by the SPMC during its
40  * initialization.
41  */
42 
43 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 	/* Set start value for handle so top 32 bits are needed quickly. */
45 	.next_handle = 0xffffffc0U,
46 };
47 
48 /**
49  * spmc_shmem_obj_size - Convert from descriptor size to object size.
50  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
51  *
52  * Return: Size of struct spmc_shmem_obj object.
53  */
54 static size_t spmc_shmem_obj_size(size_t desc_size)
55 {
56 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
57 }
58 
59 /**
60  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61  * @state:      Global state.
62  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
63  *              allocated object will hold.
64  *
65  * Return: Pointer to newly allocated object, or %NULL if there not enough space
66  *         left. The returned pointer is only valid while @state is locked, to
67  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
68  *         called.
69  */
70 static struct spmc_shmem_obj *
71 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72 {
73 	struct spmc_shmem_obj *obj;
74 	size_t free = state->data_size - state->allocated;
75 
76 	if (state->data == NULL) {
77 		ERROR("Missing shmem datastore!\n");
78 		return NULL;
79 	}
80 
81 	if (spmc_shmem_obj_size(desc_size) > free) {
82 		WARN("%s(0x%zx) failed, free 0x%zx\n",
83 		     __func__, desc_size, free);
84 		return NULL;
85 	}
86 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
87 	obj->desc = (struct ffa_mtd) {0};
88 	obj->desc_size = desc_size;
89 	obj->desc_filled = 0;
90 	obj->in_use = 0;
91 	state->allocated += spmc_shmem_obj_size(desc_size);
92 	return obj;
93 }
94 
95 /**
96  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
97  * @state:      Global state.
98  * @obj:        Object to free.
99  *
100  * Release memory used by @obj. Other objects may move, so on return all
101  * pointers to struct spmc_shmem_obj object should be considered invalid, not
102  * just @obj.
103  *
104  * The current implementation always compacts the remaining objects to simplify
105  * the allocator and to avoid fragmentation.
106  */
107 
108 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
109 				  struct spmc_shmem_obj *obj)
110 {
111 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
112 	uint8_t *shift_dest = (uint8_t *)obj;
113 	uint8_t *shift_src = shift_dest + free_size;
114 	size_t shift_size = state->allocated - (shift_src - state->data);
115 
116 	if (shift_size != 0U) {
117 		memmove(shift_dest, shift_src, shift_size);
118 	}
119 	state->allocated -= free_size;
120 }
121 
122 /**
123  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
124  * @state:      Global state.
125  * @handle:     Unique handle of object to return.
126  *
127  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
128  *         %NULL, if not object in @state->data has a matching handle.
129  */
130 static struct spmc_shmem_obj *
131 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
132 {
133 	uint8_t *curr = state->data;
134 
135 	while (curr - state->data < state->allocated) {
136 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
137 
138 		if (obj->desc.handle == handle) {
139 			return obj;
140 		}
141 		curr += spmc_shmem_obj_size(obj->desc_size);
142 	}
143 	return NULL;
144 }
145 
146 /**
147  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
148  * @offset:     Offset used to track which objects have previously been
149  *              returned.
150  *
151  * Return: the next struct spmc_shmem_obj_state object from the provided
152  *	   offset.
153  *	   %NULL, if there are no more objects.
154  */
155 static struct spmc_shmem_obj *
156 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
157 {
158 	uint8_t *curr = state->data + *offset;
159 
160 	if (curr - state->data < state->allocated) {
161 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
162 
163 		*offset += spmc_shmem_obj_size(obj->desc_size);
164 
165 		return obj;
166 	}
167 	return NULL;
168 }
169 
170 /*******************************************************************************
171  * FF-A memory descriptor helper functions.
172  ******************************************************************************/
173 /**
174  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
175  *                           clients FF-A version.
176  * @desc:         The memory transaction descriptor.
177  * @index:        The index of the emad element to be accessed.
178  * @ffa_version:  FF-A version of the provided structure.
179  * @emad_size:    Will be populated with the size of the returned emad
180  *                descriptor.
181  * Return: A pointer to the requested emad structure.
182  */
183 static void *
184 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
185 			uint32_t ffa_version, size_t *emad_size)
186 {
187 	uint8_t *emad;
188 	/*
189 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
190 	 * format, otherwise assume it is a v1.1 format.
191 	 */
192 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
193 		/* Cast our descriptor to the v1.0 format. */
194 		struct ffa_mtd_v1_0 *mtd_v1_0 =
195 					(struct ffa_mtd_v1_0 *) desc;
196 		emad = (uint8_t *)  &(mtd_v1_0->emad);
197 		*emad_size = sizeof(struct ffa_emad_v1_0);
198 	} else {
199 		if (!is_aligned(desc->emad_offset, 16)) {
200 			WARN("Emad offset is not aligned.\n");
201 			return NULL;
202 		}
203 		emad = ((uint8_t *) desc + desc->emad_offset);
204 		*emad_size = desc->emad_size;
205 	}
206 	return (emad + (*emad_size * index));
207 }
208 
209 /**
210  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
211  *				 FF-A version of the descriptor.
212  * @obj:    Object containing ffa_memory_region_descriptor.
213  *
214  * Return: struct ffa_comp_mrd object corresponding to the composite memory
215  *	   region descriptor.
216  */
217 static struct ffa_comp_mrd *
218 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
219 {
220 	size_t emad_size;
221 	/*
222 	 * The comp_mrd_offset field of the emad descriptor remains consistent
223 	 * between FF-A versions therefore we can use the v1.0 descriptor here
224 	 * in all cases.
225 	 */
226 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
227 							     ffa_version,
228 							     &emad_size);
229 	/* Ensure the emad array was found. */
230 	if (emad == NULL) {
231 		return NULL;
232 	}
233 
234 	/* Ensure the composite descriptor offset is aligned. */
235 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
236 		WARN("Unaligned composite memory region descriptor offset.\n");
237 		return NULL;
238 	}
239 
240 	return (struct ffa_comp_mrd *)
241 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
242 }
243 
244 /**
245  * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
246  * @obj:    Object containing ffa_memory_region_descriptor.
247  *
248  * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
249  */
250 static size_t
251 spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj,
252 				    uint32_t ffa_version)
253 {
254 	struct ffa_comp_mrd *comp_mrd;
255 
256 	comp_mrd = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
257 	if (comp_mrd == NULL) {
258 		return 0;
259 	}
260 	return comp_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
261 }
262 
263 /**
264  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
265  *				a given memory transaction.
266  * @sp_id:      Partition ID to validate.
267  * @desc:       Descriptor of the memory transaction.
268  *
269  * Return: true if ID is valid, else false.
270  */
271 bool spmc_shmem_obj_validate_id(const struct ffa_mtd *desc, uint16_t sp_id)
272 {
273 	bool found = false;
274 
275 	/* Validate the partition is a valid participant. */
276 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
277 		size_t emad_size;
278 		struct ffa_emad_v1_0 *emad;
279 
280 		emad = spmc_shmem_obj_get_emad(desc, i,
281 					       MAKE_FFA_VERSION(1, 1),
282 					       &emad_size);
283 		if (sp_id == emad->mapd.endpoint_id) {
284 			found = true;
285 			break;
286 		}
287 	}
288 	return found;
289 }
290 
291 /*
292  * Compare two memory regions to determine if any range overlaps with another
293  * ongoing memory transaction.
294  */
295 static bool
296 overlapping_memory_regions(struct ffa_comp_mrd *region1,
297 			   struct ffa_comp_mrd *region2)
298 {
299 	uint64_t region1_start;
300 	uint64_t region1_size;
301 	uint64_t region1_end;
302 	uint64_t region2_start;
303 	uint64_t region2_size;
304 	uint64_t region2_end;
305 
306 	assert(region1 != NULL);
307 	assert(region2 != NULL);
308 
309 	if (region1 == region2) {
310 		return true;
311 	}
312 
313 	/*
314 	 * Check each memory region in the request against existing
315 	 * transactions.
316 	 */
317 	for (size_t i = 0; i < region1->address_range_count; i++) {
318 
319 		region1_start = region1->address_range_array[i].address;
320 		region1_size =
321 			region1->address_range_array[i].page_count *
322 			PAGE_SIZE_4KB;
323 		region1_end = region1_start + region1_size;
324 
325 		for (size_t j = 0; j < region2->address_range_count; j++) {
326 
327 			region2_start = region2->address_range_array[j].address;
328 			region2_size =
329 				region2->address_range_array[j].page_count *
330 				PAGE_SIZE_4KB;
331 			region2_end = region2_start + region2_size;
332 
333 			if ((region1_start >= region2_start &&
334 			     region1_start < region2_end) ||
335 			    (region1_end > region2_start
336 			     && region1_end < region2_end)) {
337 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
338 				     region1_start, region1_end,
339 				     region2_start, region2_end);
340 				return true;
341 			}
342 		}
343 	}
344 	return false;
345 }
346 
347 /*******************************************************************************
348  * FF-A v1.0 Memory Descriptor Conversion Helpers.
349  ******************************************************************************/
350 /**
351  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
352  *                                     converted descriptor.
353  * @orig:       The original v1.0 memory transaction descriptor.
354  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
355  *
356  * Return: the size required to store the descriptor store in the v1.1 format.
357  */
358 static size_t
359 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
360 {
361 	size_t size = 0;
362 	struct ffa_comp_mrd *mrd;
363 	struct ffa_emad_v1_0 *emad_array = orig->emad;
364 
365 	/* Get the size of the v1.1 descriptor. */
366 	size += sizeof(struct ffa_mtd);
367 
368 	/* Add the size of the emad descriptors. */
369 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
370 
371 	/* Add the size of the composite mrds. */
372 	size += sizeof(struct ffa_comp_mrd);
373 
374 	/* Add the size of the constituent mrds. */
375 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
376 	      emad_array[0].comp_mrd_offset);
377 
378 	/* Check the calculated address is within the memory descriptor. */
379 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
380 		return 0;
381 	}
382 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
383 
384 	return size;
385 }
386 
387 /**
388  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
389  *                                     converted descriptor.
390  * @orig:       The original v1.1 memory transaction descriptor.
391  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
392  *
393  * Return: the size required to store the descriptor store in the v1.0 format.
394  */
395 static size_t
396 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
397 {
398 	size_t size = 0;
399 	struct ffa_comp_mrd *mrd;
400 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
401 					   ((uint8_t *) orig +
402 					    orig->emad_offset);
403 
404 	/* Get the size of the v1.0 descriptor. */
405 	size += sizeof(struct ffa_mtd_v1_0);
406 
407 	/* Add the size of the v1.0 emad descriptors. */
408 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
409 
410 	/* Add the size of the composite mrds. */
411 	size += sizeof(struct ffa_comp_mrd);
412 
413 	/* Add the size of the constituent mrds. */
414 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
415 	      emad_array[0].comp_mrd_offset);
416 
417 	/* Check the calculated address is within the memory descriptor. */
418 	if ((uintptr_t) mrd >= (uintptr_t)((uint8_t *) orig + desc_size)) {
419 		return 0;
420 	}
421 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
422 
423 	return size;
424 }
425 
426 /**
427  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
428  * @out_obj:	The shared memory object to populate the converted descriptor.
429  * @orig:	The shared memory object containing the v1.0 descriptor.
430  *
431  * Return: true if the conversion is successful else false.
432  */
433 static bool
434 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
435 				     struct spmc_shmem_obj *orig)
436 {
437 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
438 	struct ffa_mtd *out = &out_obj->desc;
439 	struct ffa_emad_v1_0 *emad_array_in;
440 	struct ffa_emad_v1_0 *emad_array_out;
441 	struct ffa_comp_mrd *mrd_in;
442 	struct ffa_comp_mrd *mrd_out;
443 
444 	size_t mrd_in_offset;
445 	size_t mrd_out_offset;
446 	size_t mrd_size = 0;
447 
448 	/* Populate the new descriptor format from the v1.0 struct. */
449 	out->sender_id = mtd_orig->sender_id;
450 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
451 	out->flags = mtd_orig->flags;
452 	out->handle = mtd_orig->handle;
453 	out->tag = mtd_orig->tag;
454 	out->emad_count = mtd_orig->emad_count;
455 	out->emad_size = sizeof(struct ffa_emad_v1_0);
456 
457 	/*
458 	 * We will locate the emad descriptors directly after the ffa_mtd
459 	 * struct. This will be 8-byte aligned.
460 	 */
461 	out->emad_offset = sizeof(struct ffa_mtd);
462 
463 	emad_array_in = mtd_orig->emad;
464 	emad_array_out = (struct ffa_emad_v1_0 *)
465 			 ((uint8_t *) out + out->emad_offset);
466 
467 	/* Copy across the emad structs. */
468 	for (unsigned int i = 0U; i < out->emad_count; i++) {
469 		memcpy(&emad_array_out[i], &emad_array_in[i],
470 		       sizeof(struct ffa_emad_v1_0));
471 	}
472 
473 	/* Place the mrd descriptors after the end of the emad descriptors.*/
474 	mrd_in_offset = emad_array_in->comp_mrd_offset;
475 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
476 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
477 
478 	/* Add the size of the composite memory region descriptor. */
479 	mrd_size += sizeof(struct ffa_comp_mrd);
480 
481 	/* Find the mrd descriptor. */
482 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
483 
484 	/* Add the size of the constituent memory region descriptors. */
485 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
486 
487 	/*
488 	 * Update the offset in the emads by the delta between the input and
489 	 * output addresses.
490 	 */
491 	for (unsigned int i = 0U; i < out->emad_count; i++) {
492 		emad_array_out[i].comp_mrd_offset =
493 			emad_array_in[i].comp_mrd_offset +
494 			(mrd_out_offset - mrd_in_offset);
495 	}
496 
497 	/* Verify that we stay within bound of the memory descriptors. */
498 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
499 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
500 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
501 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
502 		ERROR("%s: Invalid mrd structure.\n", __func__);
503 		return false;
504 	}
505 
506 	/* Copy the mrd descriptors directly. */
507 	memcpy(mrd_out, mrd_in, mrd_size);
508 
509 	return true;
510 }
511 
512 /**
513  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
514  *                                v1.0 memory object.
515  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
516  * @orig:       The shared memory object containing the v1.1 descriptor.
517  *
518  * Return: true if the conversion is successful else false.
519  */
520 static bool
521 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
522 			     struct spmc_shmem_obj *orig)
523 {
524 	struct ffa_mtd *mtd_orig = &orig->desc;
525 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
526 	struct ffa_emad_v1_0 *emad_in;
527 	struct ffa_emad_v1_0 *emad_array_in;
528 	struct ffa_emad_v1_0 *emad_array_out;
529 	struct ffa_comp_mrd *mrd_in;
530 	struct ffa_comp_mrd *mrd_out;
531 
532 	size_t mrd_in_offset;
533 	size_t mrd_out_offset;
534 	size_t emad_out_array_size;
535 	size_t mrd_size = 0;
536 
537 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
538 	out->sender_id = mtd_orig->sender_id;
539 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
540 	out->flags = mtd_orig->flags;
541 	out->handle = mtd_orig->handle;
542 	out->tag = mtd_orig->tag;
543 	out->emad_count = mtd_orig->emad_count;
544 
545 	/* Determine the location of the emad array in both descriptors. */
546 	emad_array_in = (struct ffa_emad_v1_0 *)
547 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
548 	emad_array_out = out->emad;
549 
550 	/* Copy across the emad structs. */
551 	emad_in = emad_array_in;
552 	for (unsigned int i = 0U; i < out->emad_count; i++) {
553 		memcpy(&emad_array_out[i], emad_in,
554 		       sizeof(struct ffa_emad_v1_0));
555 
556 		emad_in +=  mtd_orig->emad_size;
557 	}
558 
559 	/* Place the mrd descriptors after the end of the emad descriptors. */
560 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
561 
562 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
563 			  emad_out_array_size;
564 
565 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
566 
567 	mrd_in_offset = mtd_orig->emad_offset +
568 			(mtd_orig->emad_size * mtd_orig->emad_count);
569 
570 	/* Add the size of the composite memory region descriptor. */
571 	mrd_size += sizeof(struct ffa_comp_mrd);
572 
573 	/* Find the mrd descriptor. */
574 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
575 
576 	/* Add the size of the constituent memory region descriptors. */
577 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
578 
579 	/*
580 	 * Update the offset in the emads by the delta between the input and
581 	 * output addresses.
582 	 */
583 	emad_in = emad_array_in;
584 
585 	for (unsigned int i = 0U; i < out->emad_count; i++) {
586 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
587 						    (mrd_out_offset -
588 						     mrd_in_offset);
589 		emad_in +=  mtd_orig->emad_size;
590 	}
591 
592 	/* Verify that we stay within bound of the memory descriptors. */
593 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
594 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
595 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
596 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
597 		ERROR("%s: Invalid mrd structure.\n", __func__);
598 		return false;
599 	}
600 
601 	/* Copy the mrd descriptors directly. */
602 	memcpy(mrd_out, mrd_in, mrd_size);
603 
604 	return true;
605 }
606 
607 /**
608  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
609  *                                     the v1.0 format and populates the
610  *                                     provided buffer.
611  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
612  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
613  * @buf_size:	    Size of the buffer to populate.
614  * @offset:	    The offset of the converted descriptor to copy.
615  * @copy_size:	    Will be populated with the number of bytes copied.
616  * @out_desc_size:  Will be populated with the total size of the v1.0
617  *                  descriptor.
618  *
619  * Return: 0 if conversion and population succeeded.
620  * Note: This function invalidates the reference to @orig therefore
621  * `spmc_shmem_obj_lookup` must be called if further usage is required.
622  */
623 static uint32_t
624 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
625 				 size_t buf_size, size_t offset,
626 				 size_t *copy_size, size_t *v1_0_desc_size)
627 {
628 		struct spmc_shmem_obj *v1_0_obj;
629 
630 		/* Calculate the size that the v1.0 descriptor will require. */
631 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
632 					&orig_obj->desc, orig_obj->desc_size);
633 
634 		if (*v1_0_desc_size == 0) {
635 			ERROR("%s: cannot determine size of descriptor.\n",
636 			      __func__);
637 			return FFA_ERROR_INVALID_PARAMETER;
638 		}
639 
640 		/* Get a new obj to store the v1.0 descriptor. */
641 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
642 						*v1_0_desc_size);
643 
644 		if (!v1_0_obj) {
645 			return FFA_ERROR_NO_MEMORY;
646 		}
647 
648 		/* Perform the conversion from v1.1 to v1.0. */
649 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
650 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
651 			return FFA_ERROR_INVALID_PARAMETER;
652 		}
653 
654 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
655 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
656 
657 		/*
658 		 * We're finished with the v1.0 descriptor for now so free it.
659 		 * Note that this will invalidate any references to the v1.1
660 		 * descriptor.
661 		 */
662 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
663 
664 		return 0;
665 }
666 
667 /**
668  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
669  * @obj:	  Object containing ffa_memory_region_descriptor.
670  * @ffa_version:  FF-A version of the provided descriptor.
671  *
672  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
673  * offset or count is invalid.
674  */
675 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
676 				uint32_t ffa_version)
677 {
678 	uint32_t comp_mrd_offset = 0;
679 
680 	if (obj->desc.emad_count == 0U) {
681 		WARN("%s: unsupported attribute desc count %u.\n",
682 		     __func__, obj->desc.emad_count);
683 		return -EINVAL;
684 	}
685 
686 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
687 		size_t size;
688 		size_t count;
689 		size_t expected_size;
690 		size_t total_page_count;
691 		size_t emad_size;
692 		size_t desc_size;
693 		size_t header_emad_size;
694 		uint32_t offset;
695 		struct ffa_comp_mrd *comp;
696 		struct ffa_emad_v1_0 *emad;
697 
698 		emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
699 					       ffa_version, &emad_size);
700 		if (emad == NULL) {
701 			WARN("%s: invalid emad structure.\n", __func__);
702 			return -EINVAL;
703 		}
704 
705 		/*
706 		 * Validate the calculated emad address resides within the
707 		 * descriptor.
708 		 */
709 		if ((uintptr_t) emad >=
710 		    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
711 			WARN("Invalid emad access.\n");
712 			return -EINVAL;
713 		}
714 
715 		offset = emad->comp_mrd_offset;
716 
717 		if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
718 			desc_size =  sizeof(struct ffa_mtd_v1_0);
719 		} else {
720 			desc_size =  sizeof(struct ffa_mtd);
721 		}
722 
723 		header_emad_size = desc_size +
724 			(obj->desc.emad_count * emad_size);
725 
726 		if (offset < header_emad_size) {
727 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
728 			     __func__, offset, header_emad_size);
729 			return -EINVAL;
730 		}
731 
732 		size = obj->desc_size;
733 
734 		if (offset > size) {
735 			WARN("%s: invalid object, offset %u > total size %zu\n",
736 			     __func__, offset, obj->desc_size);
737 			return -EINVAL;
738 		}
739 		size -= offset;
740 
741 		if (size < sizeof(struct ffa_comp_mrd)) {
742 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
743 			     __func__, offset, obj->desc_size);
744 			return -EINVAL;
745 		}
746 		size -= sizeof(struct ffa_comp_mrd);
747 
748 		count = size / sizeof(struct ffa_cons_mrd);
749 
750 		comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
751 
752 		if (comp == NULL) {
753 			WARN("%s: invalid comp_mrd offset\n", __func__);
754 			return -EINVAL;
755 		}
756 
757 		if (comp->address_range_count != count) {
758 			WARN("%s: invalid object, desc count %u != %zu\n",
759 			     __func__, comp->address_range_count, count);
760 			return -EINVAL;
761 		}
762 
763 		expected_size = offset + sizeof(*comp) +
764 				spmc_shmem_obj_ffa_constituent_size(obj,
765 								    ffa_version);
766 
767 		if (expected_size != obj->desc_size) {
768 			WARN("%s: invalid object, computed size %zu != size %zu\n",
769 			       __func__, expected_size, obj->desc_size);
770 			return -EINVAL;
771 		}
772 
773 		if (obj->desc_filled < obj->desc_size) {
774 			/*
775 			 * The whole descriptor has not yet been received.
776 			 * Skip final checks.
777 			 */
778 			return 0;
779 		}
780 
781 		/*
782 		 * The offset provided to the composite memory region descriptor
783 		 * should be consistent across endpoint descriptors. Store the
784 		 * first entry and compare against subsequent entries.
785 		 */
786 		if (comp_mrd_offset == 0) {
787 			comp_mrd_offset = offset;
788 		} else {
789 			if (comp_mrd_offset != offset) {
790 				ERROR("%s: mismatching offsets provided, %u != %u\n",
791 				       __func__, offset, comp_mrd_offset);
792 				return -EINVAL;
793 			}
794 		}
795 
796 		total_page_count = 0;
797 
798 		for (size_t i = 0; i < count; i++) {
799 			total_page_count +=
800 				comp->address_range_array[i].page_count;
801 		}
802 		if (comp->total_page_count != total_page_count) {
803 			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
804 			     __func__, comp->total_page_count,
805 			total_page_count);
806 			return -EINVAL;
807 		}
808 	}
809 	return 0;
810 }
811 
812 /**
813  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
814  *				regions that are currently involved with an
815  *				existing memory transactions. This implies that
816  *				the memory is not in a valid state for lending.
817  * @obj:    Object containing ffa_memory_region_descriptor.
818  *
819  * Return: 0 if object is valid, -EINVAL if invalid memory state.
820  */
821 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
822 				      uint32_t ffa_version)
823 {
824 	size_t obj_offset = 0;
825 	struct spmc_shmem_obj *inflight_obj;
826 
827 	struct ffa_comp_mrd *other_mrd;
828 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
829 								  ffa_version);
830 
831 	if (requested_mrd == NULL) {
832 		return -EINVAL;
833 	}
834 
835 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
836 					       &obj_offset);
837 
838 	while (inflight_obj != NULL) {
839 		/*
840 		 * Don't compare the transaction to itself or to partially
841 		 * transmitted descriptors.
842 		 */
843 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
844 		    (obj->desc_size == obj->desc_filled)) {
845 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
846 							  FFA_VERSION_COMPILED);
847 			if (other_mrd == NULL) {
848 				return -EINVAL;
849 			}
850 			if (overlapping_memory_regions(requested_mrd,
851 						       other_mrd)) {
852 				return -EINVAL;
853 			}
854 		}
855 
856 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
857 						       &obj_offset);
858 	}
859 	return 0;
860 }
861 
862 static long spmc_ffa_fill_desc(struct mailbox *mbox,
863 			       struct spmc_shmem_obj *obj,
864 			       uint32_t fragment_length,
865 			       ffa_mtd_flag32_t mtd_flag,
866 			       uint32_t ffa_version,
867 			       void *smc_handle)
868 {
869 	int ret;
870 	size_t emad_size;
871 	uint32_t handle_low;
872 	uint32_t handle_high;
873 	struct ffa_emad_v1_0 *emad;
874 	struct ffa_emad_v1_0 *other_emad;
875 
876 	if (mbox->rxtx_page_count == 0U) {
877 		WARN("%s: buffer pair not registered.\n", __func__);
878 		ret = FFA_ERROR_INVALID_PARAMETER;
879 		goto err_arg;
880 	}
881 
882 	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
883 		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
884 		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
885 		ret = FFA_ERROR_INVALID_PARAMETER;
886 		goto err_arg;
887 	}
888 
889 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
890 	       (uint8_t *) mbox->tx_buffer, fragment_length);
891 
892 	if (fragment_length > obj->desc_size - obj->desc_filled) {
893 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
894 		     fragment_length, obj->desc_size - obj->desc_filled);
895 		ret = FFA_ERROR_INVALID_PARAMETER;
896 		goto err_arg;
897 	}
898 
899 	/* Ensure that the sender ID resides in the normal world. */
900 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
901 		WARN("%s: Invalid sender ID 0x%x.\n",
902 		     __func__, obj->desc.sender_id);
903 		ret = FFA_ERROR_DENIED;
904 		goto err_arg;
905 	}
906 
907 	/* Ensure the NS bit is set to 0. */
908 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
909 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
910 		ret = FFA_ERROR_INVALID_PARAMETER;
911 		goto err_arg;
912 	}
913 
914 	/*
915 	 * We don't currently support any optional flags so ensure none are
916 	 * requested.
917 	 */
918 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
919 	    (obj->desc.flags != mtd_flag)) {
920 		WARN("%s: invalid memory transaction flags %u != %u\n",
921 		     __func__, obj->desc.flags, mtd_flag);
922 		ret = FFA_ERROR_INVALID_PARAMETER;
923 		goto err_arg;
924 	}
925 
926 	if (obj->desc_filled == 0U) {
927 		/* First fragment, descriptor header has been copied */
928 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
929 		obj->desc.flags |= mtd_flag;
930 	}
931 
932 	obj->desc_filled += fragment_length;
933 	ret = spmc_shmem_check_obj(obj, ffa_version);
934 	if (ret != 0) {
935 		ret = FFA_ERROR_INVALID_PARAMETER;
936 		goto err_bad_desc;
937 	}
938 
939 	handle_low = (uint32_t)obj->desc.handle;
940 	handle_high = obj->desc.handle >> 32;
941 
942 	if (obj->desc_filled != obj->desc_size) {
943 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
944 			 handle_high, obj->desc_filled,
945 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
946 	}
947 
948 	/* The full descriptor has been received, perform any final checks. */
949 
950 	/*
951 	 * If a partition ID resides in the secure world validate that the
952 	 * partition ID is for a known partition. Ignore any partition ID
953 	 * belonging to the normal world as it is assumed the Hypervisor will
954 	 * have validated these.
955 	 */
956 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
957 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
958 					       &emad_size);
959 		if (emad == NULL) {
960 			ret = FFA_ERROR_INVALID_PARAMETER;
961 			goto err_bad_desc;
962 		}
963 
964 		ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
965 
966 		if (ffa_is_secure_world_id(ep_id)) {
967 			if (spmc_get_sp_ctx(ep_id) == NULL) {
968 				WARN("%s: Invalid receiver id 0x%x\n",
969 				     __func__, ep_id);
970 				ret = FFA_ERROR_INVALID_PARAMETER;
971 				goto err_bad_desc;
972 			}
973 		}
974 	}
975 
976 	/* Ensure partition IDs are not duplicated. */
977 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
978 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
979 					       &emad_size);
980 		if (emad == NULL) {
981 			ret = FFA_ERROR_INVALID_PARAMETER;
982 			goto err_bad_desc;
983 		}
984 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
985 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
986 							     ffa_version,
987 							     &emad_size);
988 			if (other_emad == NULL) {
989 				ret = FFA_ERROR_INVALID_PARAMETER;
990 				goto err_bad_desc;
991 			}
992 
993 			if (emad->mapd.endpoint_id ==
994 				other_emad->mapd.endpoint_id) {
995 				WARN("%s: Duplicated endpoint id 0x%x\n",
996 				     __func__, emad->mapd.endpoint_id);
997 				ret = FFA_ERROR_INVALID_PARAMETER;
998 				goto err_bad_desc;
999 			}
1000 		}
1001 	}
1002 
1003 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1004 	if (ret) {
1005 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1006 		ret = FFA_ERROR_INVALID_PARAMETER;
1007 		goto err_bad_desc;
1008 	}
1009 
1010 	/*
1011 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1012 	 * the descriptor format to use the v1.1 structures.
1013 	 */
1014 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1015 		struct spmc_shmem_obj *v1_1_obj;
1016 		uint64_t mem_handle;
1017 
1018 		/* Calculate the size that the v1.1 descriptor will required. */
1019 		size_t v1_1_desc_size =
1020 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1021 						      fragment_length);
1022 
1023 		if (v1_1_desc_size == 0U) {
1024 			ERROR("%s: cannot determine size of descriptor.\n",
1025 			      __func__);
1026 			goto err_arg;
1027 		}
1028 
1029 		/* Get a new obj to store the v1.1 descriptor. */
1030 		v1_1_obj =
1031 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, v1_1_desc_size);
1032 
1033 		if (!obj) {
1034 			ret = FFA_ERROR_NO_MEMORY;
1035 			goto err_arg;
1036 		}
1037 
1038 		/* Perform the conversion from v1.0 to v1.1. */
1039 		v1_1_obj->desc_size = v1_1_desc_size;
1040 		v1_1_obj->desc_filled = v1_1_desc_size;
1041 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1042 			ERROR("%s: Could not convert mtd!\n", __func__);
1043 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1044 			goto err_arg;
1045 		}
1046 
1047 		/*
1048 		 * We're finished with the v1.0 descriptor so free it
1049 		 * and continue our checks with the new v1.1 descriptor.
1050 		 */
1051 		mem_handle = obj->desc.handle;
1052 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1053 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1054 		if (obj == NULL) {
1055 			ERROR("%s: Failed to find converted descriptor.\n",
1056 			     __func__);
1057 			ret = FFA_ERROR_INVALID_PARAMETER;
1058 			return spmc_ffa_error_return(smc_handle, ret);
1059 		}
1060 	}
1061 
1062 	/* Allow for platform specific operations to be performed. */
1063 	ret = plat_spmc_shmem_begin(&obj->desc);
1064 	if (ret != 0) {
1065 		goto err_arg;
1066 	}
1067 
1068 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1069 		 0, 0, 0);
1070 
1071 err_bad_desc:
1072 err_arg:
1073 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1074 	return spmc_ffa_error_return(smc_handle, ret);
1075 }
1076 
1077 /**
1078  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1079  * @client:             Client state.
1080  * @total_length:       Total length of shared memory descriptor.
1081  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1082  *                      this call.
1083  * @address:            Not supported, must be 0.
1084  * @page_count:         Not supported, must be 0.
1085  * @smc_handle:         Handle passed to smc call. Used to return
1086  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1087  *
1088  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1089  * to share or lend memory from non-secure os to secure os (with no stream
1090  * endpoints).
1091  *
1092  * Return: 0 on success, error code on failure.
1093  */
1094 long spmc_ffa_mem_send(uint32_t smc_fid,
1095 			bool secure_origin,
1096 			uint64_t total_length,
1097 			uint32_t fragment_length,
1098 			uint64_t address,
1099 			uint32_t page_count,
1100 			void *cookie,
1101 			void *handle,
1102 			uint64_t flags)
1103 
1104 {
1105 	long ret;
1106 	struct spmc_shmem_obj *obj;
1107 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1108 	ffa_mtd_flag32_t mtd_flag;
1109 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1110 
1111 	if (address != 0U || page_count != 0U) {
1112 		WARN("%s: custom memory region for message not supported.\n",
1113 		     __func__);
1114 		return spmc_ffa_error_return(handle,
1115 					     FFA_ERROR_INVALID_PARAMETER);
1116 	}
1117 
1118 	if (secure_origin) {
1119 		WARN("%s: unsupported share direction.\n", __func__);
1120 		return spmc_ffa_error_return(handle,
1121 					     FFA_ERROR_INVALID_PARAMETER);
1122 	}
1123 
1124 	/*
1125 	 * Check if the descriptor is smaller than the v1.0 descriptor. The
1126 	 * descriptor cannot be smaller than this structure.
1127 	 */
1128 	if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
1129 		WARN("%s: bad first fragment size %u < %zu\n",
1130 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1131 		return spmc_ffa_error_return(handle,
1132 					     FFA_ERROR_INVALID_PARAMETER);
1133 	}
1134 
1135 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1136 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1137 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1138 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1139 	} else {
1140 		WARN("%s: invalid memory management operation.\n", __func__);
1141 		return spmc_ffa_error_return(handle,
1142 					     FFA_ERROR_INVALID_PARAMETER);
1143 	}
1144 
1145 	spin_lock(&spmc_shmem_obj_state.lock);
1146 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1147 	if (obj == NULL) {
1148 		ret = FFA_ERROR_NO_MEMORY;
1149 		goto err_unlock;
1150 	}
1151 
1152 	spin_lock(&mbox->lock);
1153 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1154 				 ffa_version, handle);
1155 	spin_unlock(&mbox->lock);
1156 
1157 	spin_unlock(&spmc_shmem_obj_state.lock);
1158 	return ret;
1159 
1160 err_unlock:
1161 	spin_unlock(&spmc_shmem_obj_state.lock);
1162 	return spmc_ffa_error_return(handle, ret);
1163 }
1164 
1165 /**
1166  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1167  * @client:             Client state.
1168  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1169  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1170  * @fragment_length:    Length of fragments transmitted.
1171  * @sender_id:          Vmid of sender in bits [31:16]
1172  * @smc_handle:         Handle passed to smc call. Used to return
1173  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1174  *
1175  * Return: @smc_handle on success, error code on failure.
1176  */
1177 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1178 			  bool secure_origin,
1179 			  uint64_t handle_low,
1180 			  uint64_t handle_high,
1181 			  uint32_t fragment_length,
1182 			  uint32_t sender_id,
1183 			  void *cookie,
1184 			  void *handle,
1185 			  uint64_t flags)
1186 {
1187 	long ret;
1188 	uint32_t desc_sender_id;
1189 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1190 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1191 
1192 	struct spmc_shmem_obj *obj;
1193 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1194 
1195 	spin_lock(&spmc_shmem_obj_state.lock);
1196 
1197 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1198 	if (obj == NULL) {
1199 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1200 		     __func__, mem_handle);
1201 		ret = FFA_ERROR_INVALID_PARAMETER;
1202 		goto err_unlock;
1203 	}
1204 
1205 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1206 	if (sender_id != desc_sender_id) {
1207 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1208 		     sender_id, desc_sender_id);
1209 		ret = FFA_ERROR_INVALID_PARAMETER;
1210 		goto err_unlock;
1211 	}
1212 
1213 	if (obj->desc_filled == obj->desc_size) {
1214 		WARN("%s: object desc already filled, %zu\n", __func__,
1215 		     obj->desc_filled);
1216 		ret = FFA_ERROR_INVALID_PARAMETER;
1217 		goto err_unlock;
1218 	}
1219 
1220 	spin_lock(&mbox->lock);
1221 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1222 				 handle);
1223 	spin_unlock(&mbox->lock);
1224 
1225 	spin_unlock(&spmc_shmem_obj_state.lock);
1226 	return ret;
1227 
1228 err_unlock:
1229 	spin_unlock(&spmc_shmem_obj_state.lock);
1230 	return spmc_ffa_error_return(handle, ret);
1231 }
1232 
1233 /**
1234  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1235  *				      if the caller implements a version greater
1236  *				      than FF-A 1.0 or if they have requested
1237  *				      the functionality.
1238  *				      TODO: We are assuming that the caller is
1239  *				      an SP. To support retrieval from the
1240  *				      normal world this function will need to be
1241  *				      expanded accordingly.
1242  * @resp:       Descriptor populated in callers RX buffer.
1243  * @sp_ctx:     Context of the calling SP.
1244  */
1245 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1246 			 struct secure_partition_desc *sp_ctx)
1247 {
1248 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1249 	    sp_ctx->ns_bit_requested) {
1250 		/*
1251 		 * Currently memory senders must reside in the normal
1252 		 * world, and we do not have the functionlaity to change
1253 		 * the state of memory dynamically. Therefore we can always set
1254 		 * the NS bit to 1.
1255 		 */
1256 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1257 	}
1258 }
1259 
1260 /**
1261  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1262  * @smc_fid:            FID of SMC
1263  * @total_length:       Total length of retrieve request descriptor if this is
1264  *                      the first call. Otherwise (unsupported) must be 0.
1265  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1266  *                      in this call. Only @fragment_length == @length is
1267  *                      supported by this implementation.
1268  * @address:            Not supported, must be 0.
1269  * @page_count:         Not supported, must be 0.
1270  * @smc_handle:         Handle passed to smc call. Used to return
1271  *                      FFA_MEM_RETRIEVE_RESP.
1272  *
1273  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1274  * Used by secure os to retrieve memory already shared by non-secure os.
1275  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1276  * the client must call FFA_MEM_FRAG_RX until the full response has been
1277  * received.
1278  *
1279  * Return: @handle on success, error code on failure.
1280  */
1281 long
1282 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1283 			  bool secure_origin,
1284 			  uint32_t total_length,
1285 			  uint32_t fragment_length,
1286 			  uint64_t address,
1287 			  uint32_t page_count,
1288 			  void *cookie,
1289 			  void *handle,
1290 			  uint64_t flags)
1291 {
1292 	int ret;
1293 	size_t buf_size;
1294 	size_t copy_size = 0;
1295 	size_t min_desc_size;
1296 	size_t out_desc_size = 0;
1297 
1298 	/*
1299 	 * Currently we are only accessing fields that are the same in both the
1300 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1301 	 * here. We only need validate against the appropriate struct size.
1302 	 */
1303 	struct ffa_mtd *resp;
1304 	const struct ffa_mtd *req;
1305 	struct spmc_shmem_obj *obj = NULL;
1306 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1307 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1308 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1309 
1310 	if (!secure_origin) {
1311 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1312 		return spmc_ffa_error_return(handle,
1313 					     FFA_ERROR_INVALID_PARAMETER);
1314 	}
1315 
1316 	if (address != 0U || page_count != 0U) {
1317 		WARN("%s: custom memory region not supported.\n", __func__);
1318 		return spmc_ffa_error_return(handle,
1319 					     FFA_ERROR_INVALID_PARAMETER);
1320 	}
1321 
1322 	spin_lock(&mbox->lock);
1323 
1324 	req = mbox->tx_buffer;
1325 	resp = mbox->rx_buffer;
1326 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1327 
1328 	if (mbox->rxtx_page_count == 0U) {
1329 		WARN("%s: buffer pair not registered.\n", __func__);
1330 		ret = FFA_ERROR_INVALID_PARAMETER;
1331 		goto err_unlock_mailbox;
1332 	}
1333 
1334 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1335 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1336 		ret = FFA_ERROR_DENIED;
1337 		goto err_unlock_mailbox;
1338 	}
1339 
1340 	if (fragment_length != total_length) {
1341 		WARN("%s: fragmented retrieve request not supported.\n",
1342 		     __func__);
1343 		ret = FFA_ERROR_INVALID_PARAMETER;
1344 		goto err_unlock_mailbox;
1345 	}
1346 
1347 	if (req->emad_count == 0U) {
1348 		WARN("%s: unsupported attribute desc count %u.\n",
1349 		     __func__, obj->desc.emad_count);
1350 		return -EINVAL;
1351 	}
1352 
1353 	/* Determine the appropriate minimum descriptor size. */
1354 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1355 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1356 	} else {
1357 		min_desc_size = sizeof(struct ffa_mtd);
1358 	}
1359 	if (total_length < min_desc_size) {
1360 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1361 		     min_desc_size);
1362 		ret = FFA_ERROR_INVALID_PARAMETER;
1363 		goto err_unlock_mailbox;
1364 	}
1365 
1366 	spin_lock(&spmc_shmem_obj_state.lock);
1367 
1368 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1369 	if (obj == NULL) {
1370 		ret = FFA_ERROR_INVALID_PARAMETER;
1371 		goto err_unlock_all;
1372 	}
1373 
1374 	if (obj->desc_filled != obj->desc_size) {
1375 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1376 		     __func__, obj->desc_filled, obj->desc_size);
1377 		ret = FFA_ERROR_INVALID_PARAMETER;
1378 		goto err_unlock_all;
1379 	}
1380 
1381 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1382 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1383 		     __func__, req->sender_id, obj->desc.sender_id);
1384 		ret = FFA_ERROR_INVALID_PARAMETER;
1385 		goto err_unlock_all;
1386 	}
1387 
1388 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1389 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1390 		     __func__, req->tag, obj->desc.tag);
1391 		ret = FFA_ERROR_INVALID_PARAMETER;
1392 		goto err_unlock_all;
1393 	}
1394 
1395 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1396 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1397 		     __func__, req->emad_count, obj->desc.emad_count);
1398 		ret = FFA_ERROR_INVALID_PARAMETER;
1399 		goto err_unlock_all;
1400 	}
1401 
1402 	/* Ensure the NS bit is set to 0 in the request. */
1403 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1404 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1405 		ret = FFA_ERROR_INVALID_PARAMETER;
1406 		goto err_unlock_all;
1407 	}
1408 
1409 	if (req->flags != 0U) {
1410 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1411 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1412 			/*
1413 			 * If the retrieve request specifies the memory
1414 			 * transaction ensure it matches what we expect.
1415 			 */
1416 			WARN("%s: wrong mem transaction flags %x != %x\n",
1417 			__func__, req->flags, obj->desc.flags);
1418 			ret = FFA_ERROR_INVALID_PARAMETER;
1419 			goto err_unlock_all;
1420 		}
1421 
1422 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1423 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1424 			/*
1425 			 * Current implementation does not support donate and
1426 			 * it supports no other flags.
1427 			 */
1428 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1429 			ret = FFA_ERROR_INVALID_PARAMETER;
1430 			goto err_unlock_all;
1431 		}
1432 	}
1433 
1434 	/* Validate the caller is a valid participant. */
1435 	if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1436 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1437 			__func__, sp_ctx->sp_id);
1438 		ret = FFA_ERROR_INVALID_PARAMETER;
1439 		goto err_unlock_all;
1440 	}
1441 
1442 	/* Validate that the provided emad offset and structure is valid.*/
1443 	for (size_t i = 0; i < req->emad_count; i++) {
1444 		size_t emad_size;
1445 		struct ffa_emad_v1_0 *emad;
1446 
1447 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1448 					       &emad_size);
1449 		if (emad == NULL) {
1450 			WARN("%s: invalid emad structure.\n", __func__);
1451 			ret = FFA_ERROR_INVALID_PARAMETER;
1452 			goto err_unlock_all;
1453 		}
1454 
1455 		if ((uintptr_t) emad >= (uintptr_t)
1456 					((uint8_t *) req + total_length)) {
1457 			WARN("Invalid emad access.\n");
1458 			ret = FFA_ERROR_INVALID_PARAMETER;
1459 			goto err_unlock_all;
1460 		}
1461 	}
1462 
1463 	/*
1464 	 * Validate all the endpoints match in the case of multiple
1465 	 * borrowers. We don't mandate that the order of the borrowers
1466 	 * must match in the descriptors therefore check to see if the
1467 	 * endpoints match in any order.
1468 	 */
1469 	for (size_t i = 0; i < req->emad_count; i++) {
1470 		bool found = false;
1471 		size_t emad_size;
1472 		struct ffa_emad_v1_0 *emad;
1473 		struct ffa_emad_v1_0 *other_emad;
1474 
1475 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1476 					       &emad_size);
1477 		if (emad == NULL) {
1478 			ret = FFA_ERROR_INVALID_PARAMETER;
1479 			goto err_unlock_all;
1480 		}
1481 
1482 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1483 			other_emad = spmc_shmem_obj_get_emad(
1484 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1485 					&emad_size);
1486 
1487 			if (other_emad == NULL) {
1488 				ret = FFA_ERROR_INVALID_PARAMETER;
1489 				goto err_unlock_all;
1490 			}
1491 
1492 			if (req->emad_count &&
1493 			    emad->mapd.endpoint_id ==
1494 			    other_emad->mapd.endpoint_id) {
1495 				found = true;
1496 				break;
1497 			}
1498 		}
1499 
1500 		if (!found) {
1501 			WARN("%s: invalid receiver id (0x%x).\n",
1502 			     __func__, emad->mapd.endpoint_id);
1503 			ret = FFA_ERROR_INVALID_PARAMETER;
1504 			goto err_unlock_all;
1505 		}
1506 	}
1507 
1508 	mbox->state = MAILBOX_STATE_FULL;
1509 
1510 	if (req->emad_count != 0U) {
1511 		obj->in_use++;
1512 	}
1513 
1514 	/*
1515 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1516 	 * directly.
1517 	 */
1518 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1519 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1520 							&copy_size,
1521 							&out_desc_size);
1522 		if (ret != 0U) {
1523 			ERROR("%s: Failed to process descriptor.\n", __func__);
1524 			goto err_unlock_all;
1525 		}
1526 	} else {
1527 		copy_size = MIN(obj->desc_size, buf_size);
1528 		out_desc_size = obj->desc_size;
1529 
1530 		memcpy(resp, &obj->desc, copy_size);
1531 	}
1532 
1533 	/* Set the NS bit in the response if applicable. */
1534 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1535 
1536 	spin_unlock(&spmc_shmem_obj_state.lock);
1537 	spin_unlock(&mbox->lock);
1538 
1539 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1540 		 copy_size, 0, 0, 0, 0, 0);
1541 
1542 err_unlock_all:
1543 	spin_unlock(&spmc_shmem_obj_state.lock);
1544 err_unlock_mailbox:
1545 	spin_unlock(&mbox->lock);
1546 	return spmc_ffa_error_return(handle, ret);
1547 }
1548 
1549 /**
1550  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1551  * @client:             Client state.
1552  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1553  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1554  * @fragment_offset:    Byte offset in descriptor to resume at.
1555  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1556  *                      hypervisor. 0 otherwise.
1557  * @smc_handle:         Handle passed to smc call. Used to return
1558  *                      FFA_MEM_FRAG_TX.
1559  *
1560  * Return: @smc_handle on success, error code on failure.
1561  */
1562 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1563 			  bool secure_origin,
1564 			  uint32_t handle_low,
1565 			  uint32_t handle_high,
1566 			  uint32_t fragment_offset,
1567 			  uint32_t sender_id,
1568 			  void *cookie,
1569 			  void *handle,
1570 			  uint64_t flags)
1571 {
1572 	int ret;
1573 	void *src;
1574 	size_t buf_size;
1575 	size_t copy_size;
1576 	size_t full_copy_size;
1577 	uint32_t desc_sender_id;
1578 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1579 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1580 	struct spmc_shmem_obj *obj;
1581 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1582 
1583 	if (!secure_origin) {
1584 		WARN("%s: can only be called from swld.\n",
1585 		     __func__);
1586 		return spmc_ffa_error_return(handle,
1587 					     FFA_ERROR_INVALID_PARAMETER);
1588 	}
1589 
1590 	spin_lock(&spmc_shmem_obj_state.lock);
1591 
1592 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1593 	if (obj == NULL) {
1594 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1595 		     __func__, mem_handle);
1596 		ret = FFA_ERROR_INVALID_PARAMETER;
1597 		goto err_unlock_shmem;
1598 	}
1599 
1600 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1601 	if (sender_id != 0U && sender_id != desc_sender_id) {
1602 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1603 		     sender_id, desc_sender_id);
1604 		ret = FFA_ERROR_INVALID_PARAMETER;
1605 		goto err_unlock_shmem;
1606 	}
1607 
1608 	if (fragment_offset >= obj->desc_size) {
1609 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1610 		     __func__, fragment_offset, obj->desc_size);
1611 		ret = FFA_ERROR_INVALID_PARAMETER;
1612 		goto err_unlock_shmem;
1613 	}
1614 
1615 	spin_lock(&mbox->lock);
1616 
1617 	if (mbox->rxtx_page_count == 0U) {
1618 		WARN("%s: buffer pair not registered.\n", __func__);
1619 		ret = FFA_ERROR_INVALID_PARAMETER;
1620 		goto err_unlock_all;
1621 	}
1622 
1623 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1624 		WARN("%s: RX Buffer is full!\n", __func__);
1625 		ret = FFA_ERROR_DENIED;
1626 		goto err_unlock_all;
1627 	}
1628 
1629 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1630 
1631 	mbox->state = MAILBOX_STATE_FULL;
1632 
1633 	/*
1634 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1635 	 * directly.
1636 	 */
1637 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1638 		size_t out_desc_size;
1639 
1640 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1641 							buf_size,
1642 							fragment_offset,
1643 							&copy_size,
1644 							&out_desc_size);
1645 		if (ret != 0U) {
1646 			ERROR("%s: Failed to process descriptor.\n", __func__);
1647 			goto err_unlock_all;
1648 		}
1649 	} else {
1650 		full_copy_size = obj->desc_size - fragment_offset;
1651 		copy_size = MIN(full_copy_size, buf_size);
1652 
1653 		src = &obj->desc;
1654 
1655 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1656 	}
1657 
1658 	spin_unlock(&mbox->lock);
1659 	spin_unlock(&spmc_shmem_obj_state.lock);
1660 
1661 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1662 		 copy_size, sender_id, 0, 0, 0);
1663 
1664 err_unlock_all:
1665 	spin_unlock(&mbox->lock);
1666 err_unlock_shmem:
1667 	spin_unlock(&spmc_shmem_obj_state.lock);
1668 	return spmc_ffa_error_return(handle, ret);
1669 }
1670 
1671 /**
1672  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1673  * @client:             Client state.
1674  *
1675  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1676  * Used by secure os release previously shared memory to non-secure os.
1677  *
1678  * The handle to release must be in the client's (secure os's) transmit buffer.
1679  *
1680  * Return: 0 on success, error code on failure.
1681  */
1682 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1683 			    bool secure_origin,
1684 			    uint32_t handle_low,
1685 			    uint32_t handle_high,
1686 			    uint32_t fragment_offset,
1687 			    uint32_t sender_id,
1688 			    void *cookie,
1689 			    void *handle,
1690 			    uint64_t flags)
1691 {
1692 	int ret;
1693 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1694 	struct spmc_shmem_obj *obj;
1695 	const struct ffa_mem_relinquish_descriptor *req;
1696 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1697 
1698 	if (!secure_origin) {
1699 		WARN("%s: unsupported relinquish direction.\n", __func__);
1700 		return spmc_ffa_error_return(handle,
1701 					     FFA_ERROR_INVALID_PARAMETER);
1702 	}
1703 
1704 	spin_lock(&mbox->lock);
1705 
1706 	if (mbox->rxtx_page_count == 0U) {
1707 		WARN("%s: buffer pair not registered.\n", __func__);
1708 		ret = FFA_ERROR_INVALID_PARAMETER;
1709 		goto err_unlock_mailbox;
1710 	}
1711 
1712 	req = mbox->tx_buffer;
1713 
1714 	if (req->flags != 0U) {
1715 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1716 		ret = FFA_ERROR_INVALID_PARAMETER;
1717 		goto err_unlock_mailbox;
1718 	}
1719 
1720 	if (req->endpoint_count == 0) {
1721 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1722 		ret = FFA_ERROR_INVALID_PARAMETER;
1723 		goto err_unlock_mailbox;
1724 	}
1725 
1726 	spin_lock(&spmc_shmem_obj_state.lock);
1727 
1728 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1729 	if (obj == NULL) {
1730 		ret = FFA_ERROR_INVALID_PARAMETER;
1731 		goto err_unlock_all;
1732 	}
1733 
1734 	/*
1735 	 * Validate the endpoint ID was populated correctly. We don't currently
1736 	 * support proxy endpoints so the endpoint count should always be 1.
1737 	 */
1738 	if (req->endpoint_count != 1U) {
1739 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1740 		     req->endpoint_count);
1741 		ret = FFA_ERROR_INVALID_PARAMETER;
1742 		goto err_unlock_all;
1743 	}
1744 
1745 	/* Validate provided endpoint ID matches the partition ID. */
1746 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1747 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1748 		     req->endpoint_array[0], sp_ctx->sp_id);
1749 		ret = FFA_ERROR_INVALID_PARAMETER;
1750 		goto err_unlock_all;
1751 	}
1752 
1753 	/* Validate the caller is a valid participant. */
1754 	if (!spmc_shmem_obj_validate_id(&obj->desc, sp_ctx->sp_id)) {
1755 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1756 			__func__, req->endpoint_array[0]);
1757 		ret = FFA_ERROR_INVALID_PARAMETER;
1758 		goto err_unlock_all;
1759 	}
1760 
1761 	if (obj->in_use == 0U) {
1762 		ret = FFA_ERROR_INVALID_PARAMETER;
1763 		goto err_unlock_all;
1764 	}
1765 	obj->in_use--;
1766 
1767 	spin_unlock(&spmc_shmem_obj_state.lock);
1768 	spin_unlock(&mbox->lock);
1769 
1770 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1771 
1772 err_unlock_all:
1773 	spin_unlock(&spmc_shmem_obj_state.lock);
1774 err_unlock_mailbox:
1775 	spin_unlock(&mbox->lock);
1776 	return spmc_ffa_error_return(handle, ret);
1777 }
1778 
1779 /**
1780  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1781  * @client:         Client state.
1782  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1783  * @handle_high:    Unique handle of shared memory object to reclaim.
1784  *                  Bit[63:32].
1785  * @flags:          Unsupported, ignored.
1786  *
1787  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1788  * Used by non-secure os reclaim memory previously shared with secure os.
1789  *
1790  * Return: 0 on success, error code on failure.
1791  */
1792 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1793 			 bool secure_origin,
1794 			 uint32_t handle_low,
1795 			 uint32_t handle_high,
1796 			 uint32_t mem_flags,
1797 			 uint64_t x4,
1798 			 void *cookie,
1799 			 void *handle,
1800 			 uint64_t flags)
1801 {
1802 	int ret;
1803 	struct spmc_shmem_obj *obj;
1804 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1805 
1806 	if (secure_origin) {
1807 		WARN("%s: unsupported reclaim direction.\n", __func__);
1808 		return spmc_ffa_error_return(handle,
1809 					     FFA_ERROR_INVALID_PARAMETER);
1810 	}
1811 
1812 	if (mem_flags != 0U) {
1813 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1814 		return spmc_ffa_error_return(handle,
1815 					     FFA_ERROR_INVALID_PARAMETER);
1816 	}
1817 
1818 	spin_lock(&spmc_shmem_obj_state.lock);
1819 
1820 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1821 	if (obj == NULL) {
1822 		ret = FFA_ERROR_INVALID_PARAMETER;
1823 		goto err_unlock;
1824 	}
1825 	if (obj->in_use != 0U) {
1826 		ret = FFA_ERROR_DENIED;
1827 		goto err_unlock;
1828 	}
1829 
1830 	/* Allow for platform specific operations to be performed. */
1831 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1832 	if (ret != 0) {
1833 		goto err_unlock;
1834 	}
1835 
1836 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1837 	spin_unlock(&spmc_shmem_obj_state.lock);
1838 
1839 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1840 
1841 err_unlock:
1842 	spin_unlock(&spmc_shmem_obj_state.lock);
1843 	return spmc_ffa_error_return(handle, ret);
1844 }
1845