xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision 31df06328112b8f2f1e94709a77dbb195dd92c34)
1 /*
2  * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 
10 #include <common/debug.h>
11 #include <common/runtime_svc.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <services/ffa_svc.h>
16 #include "spmc.h"
17 #include "spmc_shared_mem.h"
18 
19 #include <platform_def.h>
20 
21 /**
22  * struct spmc_shmem_obj - Shared memory object.
23  * @desc_size:      Size of @desc.
24  * @desc_filled:    Size of @desc already received.
25  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
26  *                  without a matching ffa_mem_relinquish call.
27  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
28  */
29 struct spmc_shmem_obj {
30 	size_t desc_size;
31 	size_t desc_filled;
32 	size_t in_use;
33 	struct ffa_mtd desc;
34 };
35 
36 /*
37  * Declare our data structure to store the metadata of memory share requests.
38  * The main datastore is allocated on a per platform basis to ensure enough
39  * storage can be made available.
40  * The address of the data store will be populated by the SPMC during its
41  * initialization.
42  */
43 
44 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
45 	/* Set start value for handle so top 32 bits are needed quickly. */
46 	.next_handle = 0xffffffc0U,
47 };
48 
49 /**
50  * spmc_shmem_obj_size - Convert from descriptor size to object size.
51  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
52  *
53  * Return: Size of struct spmc_shmem_obj object.
54  */
55 static size_t spmc_shmem_obj_size(size_t desc_size)
56 {
57 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
58 }
59 
60 /**
61  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
62  * @state:      Global state.
63  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
64  *              allocated object will hold.
65  *
66  * Return: Pointer to newly allocated object, or %NULL if there not enough space
67  *         left. The returned pointer is only valid while @state is locked, to
68  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
69  *         called.
70  */
71 static struct spmc_shmem_obj *
72 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
73 {
74 	struct spmc_shmem_obj *obj;
75 	size_t free = state->data_size - state->allocated;
76 	size_t obj_size;
77 
78 	if (state->data == NULL) {
79 		ERROR("Missing shmem datastore!\n");
80 		return NULL;
81 	}
82 
83 	/* Ensure that descriptor size is aligned */
84 	if (!is_aligned(desc_size, 16)) {
85 		WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
86 		     __func__, desc_size);
87 		return NULL;
88 	}
89 
90 	obj_size = spmc_shmem_obj_size(desc_size);
91 
92 	/* Ensure the obj size has not overflowed. */
93 	if (obj_size < desc_size) {
94 		WARN("%s(0x%zx) desc_size overflow\n",
95 		     __func__, desc_size);
96 		return NULL;
97 	}
98 
99 	if (obj_size > free) {
100 		WARN("%s(0x%zx) failed, free 0x%zx\n",
101 		     __func__, desc_size, free);
102 		return NULL;
103 	}
104 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
105 	obj->desc = (struct ffa_mtd) {0};
106 	obj->desc_size = desc_size;
107 	obj->desc_filled = 0;
108 	obj->in_use = 0;
109 	state->allocated += obj_size;
110 	return obj;
111 }
112 
113 /**
114  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
115  * @state:      Global state.
116  * @obj:        Object to free.
117  *
118  * Release memory used by @obj. Other objects may move, so on return all
119  * pointers to struct spmc_shmem_obj object should be considered invalid, not
120  * just @obj.
121  *
122  * The current implementation always compacts the remaining objects to simplify
123  * the allocator and to avoid fragmentation.
124  */
125 
126 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
127 				  struct spmc_shmem_obj *obj)
128 {
129 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
130 	uint8_t *shift_dest = (uint8_t *)obj;
131 	uint8_t *shift_src = shift_dest + free_size;
132 	size_t shift_size = state->allocated - (shift_src - state->data);
133 
134 	if (shift_size != 0U) {
135 		memmove(shift_dest, shift_src, shift_size);
136 	}
137 	state->allocated -= free_size;
138 }
139 
140 /**
141  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
142  * @state:      Global state.
143  * @handle:     Unique handle of object to return.
144  *
145  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
146  *         %NULL, if not object in @state->data has a matching handle.
147  */
148 static struct spmc_shmem_obj *
149 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
150 {
151 	uint8_t *curr = state->data;
152 
153 	while (curr - state->data < state->allocated) {
154 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
155 
156 		if (obj->desc.handle == handle) {
157 			return obj;
158 		}
159 		curr += spmc_shmem_obj_size(obj->desc_size);
160 	}
161 	return NULL;
162 }
163 
164 /**
165  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
166  * @offset:     Offset used to track which objects have previously been
167  *              returned.
168  *
169  * Return: the next struct spmc_shmem_obj_state object from the provided
170  *	   offset.
171  *	   %NULL, if there are no more objects.
172  */
173 static struct spmc_shmem_obj *
174 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
175 {
176 	uint8_t *curr = state->data + *offset;
177 
178 	if (curr - state->data < state->allocated) {
179 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
180 
181 		*offset += spmc_shmem_obj_size(obj->desc_size);
182 
183 		return obj;
184 	}
185 	return NULL;
186 }
187 
188 /*******************************************************************************
189  * FF-A memory descriptor helper functions.
190  ******************************************************************************/
191 /**
192  * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
193  *                           clients FF-A version.
194  * @desc:         The memory transaction descriptor.
195  * @index:        The index of the emad element to be accessed.
196  * @ffa_version:  FF-A version of the provided structure.
197  * @emad_size:    Will be populated with the size of the returned emad
198  *                descriptor.
199  * Return: A pointer to the requested emad structure.
200  */
201 static void *
202 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
203 			uint32_t ffa_version, size_t *emad_size)
204 {
205 	uint8_t *emad;
206 
207 	assert(index < desc->emad_count);
208 
209 	/*
210 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
211 	 * format, otherwise assume it is a v1.1 format.
212 	 */
213 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
214 		emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
215 		*emad_size = sizeof(struct ffa_emad_v1_0);
216 	} else {
217 		assert(is_aligned(desc->emad_offset, 16));
218 		emad = ((uint8_t *) desc + desc->emad_offset);
219 		*emad_size = desc->emad_size;
220 	}
221 
222 	assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
223 	return (emad + (*emad_size * index));
224 }
225 
226 /**
227  * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
228  *				 FF-A version of the descriptor.
229  * @obj:    Object containing ffa_memory_region_descriptor.
230  *
231  * Return: struct ffa_comp_mrd object corresponding to the composite memory
232  *	   region descriptor.
233  */
234 static struct ffa_comp_mrd *
235 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
236 {
237 	size_t emad_size;
238 	/*
239 	 * The comp_mrd_offset field of the emad descriptor remains consistent
240 	 * between FF-A versions therefore we can use the v1.0 descriptor here
241 	 * in all cases.
242 	 */
243 	struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
244 							     ffa_version,
245 							     &emad_size);
246 
247 	/* Ensure the composite descriptor offset is aligned. */
248 	if (!is_aligned(emad->comp_mrd_offset, 8)) {
249 		WARN("Unaligned composite memory region descriptor offset.\n");
250 		return NULL;
251 	}
252 
253 	return (struct ffa_comp_mrd *)
254 	       ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
255 }
256 
257 /**
258  * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
259  *				a given memory transaction.
260  * @sp_id:      Partition ID to validate.
261  * @obj:        The shared memory object containing the descriptor
262  *              of the memory transaction.
263  * Return: true if ID is valid, else false.
264  */
265 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
266 {
267 	bool found = false;
268 	struct ffa_mtd *desc = &obj->desc;
269 	size_t desc_size = obj->desc_size;
270 
271 	/* Validate the partition is a valid participant. */
272 	for (unsigned int i = 0U; i < desc->emad_count; i++) {
273 		size_t emad_size;
274 		struct ffa_emad_v1_0 *emad;
275 
276 		emad = spmc_shmem_obj_get_emad(desc, i,
277 					       MAKE_FFA_VERSION(1, 1),
278 					       &emad_size);
279 		/*
280 		 * Validate the calculated emad address resides within the
281 		 * descriptor.
282 		 */
283 		if ((emad == NULL) || (uintptr_t) emad >=
284 		    (uintptr_t)((uint8_t *) desc + desc_size)) {
285 			VERBOSE("Invalid emad.\n");
286 			break;
287 		}
288 		if (sp_id == emad->mapd.endpoint_id) {
289 			found = true;
290 			break;
291 		}
292 	}
293 	return found;
294 }
295 
296 /*
297  * Compare two memory regions to determine if any range overlaps with another
298  * ongoing memory transaction.
299  */
300 static bool
301 overlapping_memory_regions(struct ffa_comp_mrd *region1,
302 			   struct ffa_comp_mrd *region2)
303 {
304 	uint64_t region1_start;
305 	uint64_t region1_size;
306 	uint64_t region1_end;
307 	uint64_t region2_start;
308 	uint64_t region2_size;
309 	uint64_t region2_end;
310 
311 	assert(region1 != NULL);
312 	assert(region2 != NULL);
313 
314 	if (region1 == region2) {
315 		return true;
316 	}
317 
318 	/*
319 	 * Check each memory region in the request against existing
320 	 * transactions.
321 	 */
322 	for (size_t i = 0; i < region1->address_range_count; i++) {
323 
324 		region1_start = region1->address_range_array[i].address;
325 		region1_size =
326 			region1->address_range_array[i].page_count *
327 			PAGE_SIZE_4KB;
328 		region1_end = region1_start + region1_size;
329 
330 		for (size_t j = 0; j < region2->address_range_count; j++) {
331 
332 			region2_start = region2->address_range_array[j].address;
333 			region2_size =
334 				region2->address_range_array[j].page_count *
335 				PAGE_SIZE_4KB;
336 			region2_end = region2_start + region2_size;
337 
338 			/* Check if regions are not overlapping. */
339 			if (!((region2_end <= region1_start) ||
340 			      (region1_end <= region2_start))) {
341 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
342 				     region1_start, region1_end,
343 				     region2_start, region2_end);
344 				return true;
345 			}
346 		}
347 	}
348 	return false;
349 }
350 
351 /*******************************************************************************
352  * FF-A v1.0 Memory Descriptor Conversion Helpers.
353  ******************************************************************************/
354 /**
355  * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
356  *                                     converted descriptor.
357  * @orig:       The original v1.0 memory transaction descriptor.
358  * @desc_size:  The size of the original v1.0 memory transaction descriptor.
359  *
360  * Return: the size required to store the descriptor store in the v1.1 format.
361  */
362 static uint64_t
363 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
364 {
365 	uint64_t size = 0;
366 	struct ffa_comp_mrd *mrd;
367 	struct ffa_emad_v1_0 *emad_array = orig->emad;
368 
369 	/* Get the size of the v1.1 descriptor. */
370 	size += sizeof(struct ffa_mtd);
371 
372 	/* Add the size of the emad descriptors. */
373 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
374 
375 	/* Add the size of the composite mrds. */
376 	size += sizeof(struct ffa_comp_mrd);
377 
378 	/* Add the size of the constituent mrds. */
379 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
380 	      emad_array[0].comp_mrd_offset);
381 
382 	/* Add the size of the memory region descriptors. */
383 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
384 
385 	return size;
386 }
387 
388 /**
389  * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
390  *                                     converted descriptor.
391  * @orig:       The original v1.1 memory transaction descriptor.
392  * @desc_size:  The size of the original v1.1 memory transaction descriptor.
393  *
394  * Return: the size required to store the descriptor store in the v1.0 format.
395  */
396 static size_t
397 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
398 {
399 	size_t size = 0;
400 	struct ffa_comp_mrd *mrd;
401 	struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
402 					   ((uint8_t *) orig +
403 					    orig->emad_offset);
404 
405 	/* Get the size of the v1.0 descriptor. */
406 	size += sizeof(struct ffa_mtd_v1_0);
407 
408 	/* Add the size of the v1.0 emad descriptors. */
409 	size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
410 
411 	/* Add the size of the composite mrds. */
412 	size += sizeof(struct ffa_comp_mrd);
413 
414 	/* Add the size of the constituent mrds. */
415 	mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
416 	      emad_array[0].comp_mrd_offset);
417 
418 	/* Check the calculated address is within the memory descriptor. */
419 	if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
420 	    (uintptr_t)((uint8_t *) orig + desc_size)) {
421 		return 0;
422 	}
423 	size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
424 
425 	return size;
426 }
427 
428 /**
429  * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
430  * @out_obj:	The shared memory object to populate the converted descriptor.
431  * @orig:	The shared memory object containing the v1.0 descriptor.
432  *
433  * Return: true if the conversion is successful else false.
434  */
435 static bool
436 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
437 				     struct spmc_shmem_obj *orig)
438 {
439 	struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
440 	struct ffa_mtd *out = &out_obj->desc;
441 	struct ffa_emad_v1_0 *emad_array_in;
442 	struct ffa_emad_v1_0 *emad_array_out;
443 	struct ffa_comp_mrd *mrd_in;
444 	struct ffa_comp_mrd *mrd_out;
445 
446 	size_t mrd_in_offset;
447 	size_t mrd_out_offset;
448 	size_t mrd_size = 0;
449 
450 	/* Populate the new descriptor format from the v1.0 struct. */
451 	out->sender_id = mtd_orig->sender_id;
452 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
453 	out->flags = mtd_orig->flags;
454 	out->handle = mtd_orig->handle;
455 	out->tag = mtd_orig->tag;
456 	out->emad_count = mtd_orig->emad_count;
457 	out->emad_size = sizeof(struct ffa_emad_v1_0);
458 
459 	/*
460 	 * We will locate the emad descriptors directly after the ffa_mtd
461 	 * struct. This will be 8-byte aligned.
462 	 */
463 	out->emad_offset = sizeof(struct ffa_mtd);
464 
465 	emad_array_in = mtd_orig->emad;
466 	emad_array_out = (struct ffa_emad_v1_0 *)
467 			 ((uint8_t *) out + out->emad_offset);
468 
469 	/* Copy across the emad structs. */
470 	for (unsigned int i = 0U; i < out->emad_count; i++) {
471 		/* Bound check for emad array. */
472 		if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
473 		    ((uint8_t *) mtd_orig + orig->desc_size)) {
474 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
475 			return false;
476 		}
477 		memcpy(&emad_array_out[i], &emad_array_in[i],
478 		       sizeof(struct ffa_emad_v1_0));
479 	}
480 
481 	/* Place the mrd descriptors after the end of the emad descriptors.*/
482 	mrd_in_offset = emad_array_in->comp_mrd_offset;
483 	mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
484 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
485 
486 	/* Add the size of the composite memory region descriptor. */
487 	mrd_size += sizeof(struct ffa_comp_mrd);
488 
489 	/* Find the mrd descriptor. */
490 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
491 
492 	/* Add the size of the constituent memory region descriptors. */
493 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
494 
495 	/*
496 	 * Update the offset in the emads by the delta between the input and
497 	 * output addresses.
498 	 */
499 	for (unsigned int i = 0U; i < out->emad_count; i++) {
500 		emad_array_out[i].comp_mrd_offset =
501 			emad_array_in[i].comp_mrd_offset +
502 			(mrd_out_offset - mrd_in_offset);
503 	}
504 
505 	/* Verify that we stay within bound of the memory descriptors. */
506 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
507 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
508 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
509 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
510 		ERROR("%s: Invalid mrd structure.\n", __func__);
511 		return false;
512 	}
513 
514 	/* Copy the mrd descriptors directly. */
515 	memcpy(mrd_out, mrd_in, mrd_size);
516 
517 	return true;
518 }
519 
520 /**
521  * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
522  *                                v1.0 memory object.
523  * @out_obj:    The shared memory object to populate the v1.0 descriptor.
524  * @orig:       The shared memory object containing the v1.1 descriptor.
525  *
526  * Return: true if the conversion is successful else false.
527  */
528 static bool
529 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
530 			     struct spmc_shmem_obj *orig)
531 {
532 	struct ffa_mtd *mtd_orig = &orig->desc;
533 	struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
534 	struct ffa_emad_v1_0 *emad_in;
535 	struct ffa_emad_v1_0 *emad_array_in;
536 	struct ffa_emad_v1_0 *emad_array_out;
537 	struct ffa_comp_mrd *mrd_in;
538 	struct ffa_comp_mrd *mrd_out;
539 
540 	size_t mrd_in_offset;
541 	size_t mrd_out_offset;
542 	size_t emad_out_array_size;
543 	size_t mrd_size = 0;
544 	size_t orig_desc_size = orig->desc_size;
545 
546 	/* Populate the v1.0 descriptor format from the v1.1 struct. */
547 	out->sender_id = mtd_orig->sender_id;
548 	out->memory_region_attributes = mtd_orig->memory_region_attributes;
549 	out->flags = mtd_orig->flags;
550 	out->handle = mtd_orig->handle;
551 	out->tag = mtd_orig->tag;
552 	out->emad_count = mtd_orig->emad_count;
553 
554 	/* Determine the location of the emad array in both descriptors. */
555 	emad_array_in = (struct ffa_emad_v1_0 *)
556 			((uint8_t *) mtd_orig + mtd_orig->emad_offset);
557 	emad_array_out = out->emad;
558 
559 	/* Copy across the emad structs. */
560 	emad_in = emad_array_in;
561 	for (unsigned int i = 0U; i < out->emad_count; i++) {
562 		/* Bound check for emad array. */
563 		if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
564 				((uint8_t *) mtd_orig + orig_desc_size)) {
565 			VERBOSE("%s: Invalid mtd structure.\n", __func__);
566 			return false;
567 		}
568 		memcpy(&emad_array_out[i], emad_in,
569 		       sizeof(struct ffa_emad_v1_0));
570 
571 		emad_in +=  mtd_orig->emad_size;
572 	}
573 
574 	/* Place the mrd descriptors after the end of the emad descriptors. */
575 	emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
576 
577 	mrd_out_offset =  (uint8_t *) out->emad - (uint8_t *) out +
578 			  emad_out_array_size;
579 
580 	mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
581 
582 	mrd_in_offset = mtd_orig->emad_offset +
583 			(mtd_orig->emad_size * mtd_orig->emad_count);
584 
585 	/* Add the size of the composite memory region descriptor. */
586 	mrd_size += sizeof(struct ffa_comp_mrd);
587 
588 	/* Find the mrd descriptor. */
589 	mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
590 
591 	/* Add the size of the constituent memory region descriptors. */
592 	mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
593 
594 	/*
595 	 * Update the offset in the emads by the delta between the input and
596 	 * output addresses.
597 	 */
598 	emad_in = emad_array_in;
599 
600 	for (unsigned int i = 0U; i < out->emad_count; i++) {
601 		emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
602 						    (mrd_out_offset -
603 						     mrd_in_offset);
604 		emad_in +=  mtd_orig->emad_size;
605 	}
606 
607 	/* Verify that we stay within bound of the memory descriptors. */
608 	if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
609 	     (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
610 	    ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
611 	     (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
612 		ERROR("%s: Invalid mrd structure.\n", __func__);
613 		return false;
614 	}
615 
616 	/* Copy the mrd descriptors directly. */
617 	memcpy(mrd_out, mrd_in, mrd_size);
618 
619 	return true;
620 }
621 
622 /**
623  * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
624  *                                     the v1.0 format and populates the
625  *                                     provided buffer.
626  * @dst:	    Buffer to populate v1.0 ffa_memory_region_descriptor.
627  * @orig_obj:	    Object containing v1.1 ffa_memory_region_descriptor.
628  * @buf_size:	    Size of the buffer to populate.
629  * @offset:	    The offset of the converted descriptor to copy.
630  * @copy_size:	    Will be populated with the number of bytes copied.
631  * @out_desc_size:  Will be populated with the total size of the v1.0
632  *                  descriptor.
633  *
634  * Return: 0 if conversion and population succeeded.
635  * Note: This function invalidates the reference to @orig therefore
636  * `spmc_shmem_obj_lookup` must be called if further usage is required.
637  */
638 static uint32_t
639 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
640 				 size_t buf_size, size_t offset,
641 				 size_t *copy_size, size_t *v1_0_desc_size)
642 {
643 		struct spmc_shmem_obj *v1_0_obj;
644 
645 		/* Calculate the size that the v1.0 descriptor will require. */
646 		*v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
647 					&orig_obj->desc, orig_obj->desc_size);
648 
649 		if (*v1_0_desc_size == 0) {
650 			ERROR("%s: cannot determine size of descriptor.\n",
651 			      __func__);
652 			return FFA_ERROR_INVALID_PARAMETER;
653 		}
654 
655 		/* Get a new obj to store the v1.0 descriptor. */
656 		v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
657 						*v1_0_desc_size);
658 
659 		if (!v1_0_obj) {
660 			return FFA_ERROR_NO_MEMORY;
661 		}
662 
663 		/* Perform the conversion from v1.1 to v1.0. */
664 		if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
665 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
666 			return FFA_ERROR_INVALID_PARAMETER;
667 		}
668 
669 		*copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
670 		memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
671 
672 		/*
673 		 * We're finished with the v1.0 descriptor for now so free it.
674 		 * Note that this will invalidate any references to the v1.1
675 		 * descriptor.
676 		 */
677 		spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
678 
679 		return 0;
680 }
681 
682 static int
683 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
684 			size_t fragment_length, size_t total_length)
685 {
686 	unsigned long long emad_end;
687 	unsigned long long emad_size;
688 	unsigned long long emad_offset;
689 	unsigned int min_desc_size;
690 
691 	/* Determine the appropriate minimum descriptor size. */
692 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
693 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
694 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
695 		min_desc_size = sizeof(struct ffa_mtd);
696 	} else {
697 		return FFA_ERROR_INVALID_PARAMETER;
698 	}
699 	if (fragment_length < min_desc_size) {
700 		WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
701 		     min_desc_size);
702 		return FFA_ERROR_INVALID_PARAMETER;
703 	}
704 
705 	if (desc->emad_count == 0U) {
706 		WARN("%s: unsupported attribute desc count %u.\n",
707 		     __func__, desc->emad_count);
708 		return FFA_ERROR_INVALID_PARAMETER;
709 	}
710 
711 	/*
712 	 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
713 	 * format, otherwise assume it is a v1.1 format.
714 	 */
715 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
716 		emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
717 	} else {
718 		if (!is_aligned(desc->emad_offset, 16)) {
719 			WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
720 			     __func__, desc->emad_offset);
721 			return FFA_ERROR_INVALID_PARAMETER;
722 		}
723 		if (desc->emad_offset < sizeof(struct ffa_mtd)) {
724 			WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
725 			     __func__, desc->emad_offset,
726 			     sizeof(struct ffa_mtd));
727 			return FFA_ERROR_INVALID_PARAMETER;
728 		}
729 		emad_offset = desc->emad_offset;
730 		if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
731 			WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
732 			     desc->emad_size, sizeof(struct ffa_emad_v1_0));
733 			return FFA_ERROR_INVALID_PARAMETER;
734 		}
735 		if (!is_aligned(desc->emad_size, 16)) {
736 			WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
737 			     __func__, desc->emad_size);
738 			return FFA_ERROR_INVALID_PARAMETER;
739 		}
740 		emad_size = desc->emad_size;
741 	}
742 
743 	/*
744 	 * Overflow is impossible: the arithmetic happens in at least 64-bit
745 	 * precision, but all of the operands are bounded by UINT32_MAX, and
746 	 * ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
747 	 * = (2^64 - 1).
748 	 */
749 	CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
750 	emad_end = (desc->emad_count * (unsigned long long)emad_size) +
751 		   (unsigned long long)sizeof(struct ffa_comp_mrd) +
752 		   (unsigned long long)emad_offset;
753 
754 	if (emad_end > total_length) {
755 		WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
756 		     __func__, emad_end, total_length);
757 		return FFA_ERROR_INVALID_PARAMETER;
758 	}
759 
760 	return 0;
761 }
762 
763 static inline const struct ffa_emad_v1_0 *
764 emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
765 {
766 	return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
767 }
768 
769 /**
770  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
771  * @obj:	  Object containing ffa_memory_region_descriptor.
772  * @ffa_version:  FF-A version of the provided descriptor.
773  *
774  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
775  * constituent_memory_region_descriptor offset or count is invalid.
776  */
777 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
778 				uint32_t ffa_version)
779 {
780 	uint64_t total_page_count;
781 	const struct ffa_emad_v1_0 *first_emad;
782 	const struct ffa_emad_v1_0 *end_emad;
783 	size_t emad_size;
784 	uint32_t comp_mrd_offset = 0;
785 	size_t header_emad_size;
786 	size_t size;
787 	size_t count;
788 	size_t expected_size;
789 	struct ffa_comp_mrd *comp;
790 
791 	if (obj->desc_filled != obj->desc_size) {
792 		ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
793 		      __func__, obj->desc_filled, obj->desc_size);
794 		panic();
795 	}
796 
797 	if (spmc_validate_mtd_start(&obj->desc, ffa_version,
798 				    obj->desc_filled, obj->desc_size)) {
799 		ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
800 		      __func__);
801 		panic();
802 	}
803 
804 	first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
805 					     ffa_version, &emad_size);
806 	end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
807 	comp_mrd_offset = first_emad->comp_mrd_offset;
808 
809 	/* Loop through the endpoint descriptors, validating each of them. */
810 	for (const struct ffa_emad_v1_0 *emad = first_emad;
811 	     emad < end_emad;
812 	     emad = emad_advance(emad, emad_size)) {
813 		ffa_endpoint_id16_t ep_id;
814 
815 		/*
816 		 * If a partition ID resides in the secure world validate that
817 		 * the partition ID is for a known partition. Ignore any
818 		 * partition ID belonging to the normal world as it is assumed
819 		 * the Hypervisor will have validated these.
820 		 */
821 		ep_id = emad->mapd.endpoint_id;
822 		if (ffa_is_secure_world_id(ep_id)) {
823 			if (spmc_get_sp_ctx(ep_id) == NULL) {
824 				WARN("%s: Invalid receiver id 0x%x\n",
825 				     __func__, ep_id);
826 				return FFA_ERROR_INVALID_PARAMETER;
827 			}
828 		}
829 
830 		/*
831 		 * The offset provided to the composite memory region descriptor
832 		 * should be consistent across endpoint descriptors.
833 		 */
834 		if (comp_mrd_offset != emad->comp_mrd_offset) {
835 			ERROR("%s: mismatching offsets provided, %u != %u\n",
836 			       __func__, emad->comp_mrd_offset, comp_mrd_offset);
837 			return FFA_ERROR_INVALID_PARAMETER;
838 		}
839 	}
840 
841 	header_emad_size = (size_t)((const uint8_t *)end_emad -
842 				    (const uint8_t *)&obj->desc);
843 
844 	/*
845 	 * Check that the composite descriptor
846 	 * is after the endpoint descriptors.
847 	 */
848 	if (comp_mrd_offset < header_emad_size) {
849 		WARN("%s: invalid object, offset %u < header + emad %zu\n",
850 		     __func__, comp_mrd_offset, header_emad_size);
851 		return FFA_ERROR_INVALID_PARAMETER;
852 	}
853 
854 	/* Ensure the composite descriptor offset is aligned. */
855 	if (!is_aligned(comp_mrd_offset, 16)) {
856 		WARN("%s: invalid object, unaligned composite memory "
857 		     "region descriptor offset %u.\n",
858 		     __func__, comp_mrd_offset);
859 		return FFA_ERROR_INVALID_PARAMETER;
860 	}
861 
862 	size = obj->desc_size;
863 
864 	/* Check that the composite descriptor is in bounds. */
865 	if (comp_mrd_offset > size) {
866 		WARN("%s: invalid object, offset %u > total size %zu\n",
867 		     __func__, comp_mrd_offset, obj->desc_size);
868 		return FFA_ERROR_INVALID_PARAMETER;
869 	}
870 	size -= comp_mrd_offset;
871 
872 	if (size < sizeof(struct ffa_comp_mrd)) {
873 		WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
874 		     __func__, comp_mrd_offset, obj->desc_size);
875 		return FFA_ERROR_INVALID_PARAMETER;
876 	}
877 	size -= sizeof(struct ffa_comp_mrd);
878 
879 	count = size / sizeof(struct ffa_cons_mrd);
880 
881 	comp = spmc_shmem_obj_get_comp_mrd(obj, ffa_version);
882 
883 	if (comp->address_range_count != count) {
884 		WARN("%s: invalid object, desc count %u != %zu\n",
885 		     __func__, comp->address_range_count, count);
886 		return FFA_ERROR_INVALID_PARAMETER;
887 	}
888 
889 	expected_size = comp_mrd_offset + sizeof(*comp) +
890 		count * sizeof(struct ffa_cons_mrd);
891 
892 	if (expected_size != obj->desc_size) {
893 		WARN("%s: invalid object, computed size %zu != size %zu\n",
894 		       __func__, expected_size, obj->desc_size);
895 		return FFA_ERROR_INVALID_PARAMETER;
896 	}
897 
898 	total_page_count = 0;
899 
900 	for (size_t i = 0; i < count; i++) {
901 		total_page_count +=
902 			comp->address_range_array[i].page_count;
903 	}
904 	if (comp->total_page_count != total_page_count) {
905 		WARN("%s: invalid object, desc total_page_count %u != %" PRIu64 "\n",
906 		     __func__, comp->total_page_count,
907 		total_page_count);
908 		return FFA_ERROR_INVALID_PARAMETER;
909 	}
910 
911 	return 0;
912 }
913 
914 /**
915  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
916  *				regions that are currently involved with an
917  *				existing memory transactions. This implies that
918  *				the memory is not in a valid state for lending.
919  * @obj:    Object containing ffa_memory_region_descriptor.
920  *
921  * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
922  * state.
923  */
924 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
925 				      uint32_t ffa_version)
926 {
927 	size_t obj_offset = 0;
928 	struct spmc_shmem_obj *inflight_obj;
929 
930 	struct ffa_comp_mrd *other_mrd;
931 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
932 								  ffa_version);
933 
934 	if (requested_mrd == NULL) {
935 		return FFA_ERROR_INVALID_PARAMETER;
936 	}
937 
938 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
939 					       &obj_offset);
940 
941 	while (inflight_obj != NULL) {
942 		/*
943 		 * Don't compare the transaction to itself or to partially
944 		 * transmitted descriptors.
945 		 */
946 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
947 		    (obj->desc_size == obj->desc_filled)) {
948 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
949 							  FFA_VERSION_COMPILED);
950 			if (other_mrd == NULL) {
951 				return FFA_ERROR_INVALID_PARAMETER;
952 			}
953 			if (overlapping_memory_regions(requested_mrd,
954 						       other_mrd)) {
955 				return FFA_ERROR_INVALID_PARAMETER;
956 			}
957 		}
958 
959 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
960 						       &obj_offset);
961 	}
962 	return 0;
963 }
964 
965 static long spmc_ffa_fill_desc(struct mailbox *mbox,
966 			       struct spmc_shmem_obj *obj,
967 			       uint32_t fragment_length,
968 			       ffa_mtd_flag32_t mtd_flag,
969 			       uint32_t ffa_version,
970 			       void *smc_handle)
971 {
972 	int ret;
973 	size_t emad_size;
974 	uint32_t handle_low;
975 	uint32_t handle_high;
976 	struct ffa_emad_v1_0 *emad;
977 	struct ffa_emad_v1_0 *other_emad;
978 
979 	if (mbox->rxtx_page_count == 0U) {
980 		WARN("%s: buffer pair not registered.\n", __func__);
981 		ret = FFA_ERROR_INVALID_PARAMETER;
982 		goto err_arg;
983 	}
984 
985 	CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
986 	if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
987 		WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
988 		     fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
989 		ret = FFA_ERROR_INVALID_PARAMETER;
990 		goto err_arg;
991 	}
992 
993 	if (fragment_length > obj->desc_size - obj->desc_filled) {
994 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
995 		     fragment_length, obj->desc_size - obj->desc_filled);
996 		ret = FFA_ERROR_INVALID_PARAMETER;
997 		goto err_arg;
998 	}
999 
1000 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1001 	       (uint8_t *) mbox->tx_buffer, fragment_length);
1002 
1003 	/* Ensure that the sender ID resides in the normal world. */
1004 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1005 		WARN("%s: Invalid sender ID 0x%x.\n",
1006 		     __func__, obj->desc.sender_id);
1007 		ret = FFA_ERROR_DENIED;
1008 		goto err_arg;
1009 	}
1010 
1011 	/* Ensure the NS bit is set to 0. */
1012 	if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1013 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1014 		ret = FFA_ERROR_INVALID_PARAMETER;
1015 		goto err_arg;
1016 	}
1017 
1018 	/*
1019 	 * We don't currently support any optional flags so ensure none are
1020 	 * requested.
1021 	 */
1022 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
1023 	    (obj->desc.flags != mtd_flag)) {
1024 		WARN("%s: invalid memory transaction flags %u != %u\n",
1025 		     __func__, obj->desc.flags, mtd_flag);
1026 		ret = FFA_ERROR_INVALID_PARAMETER;
1027 		goto err_arg;
1028 	}
1029 
1030 	if (obj->desc_filled == 0U) {
1031 		/* First fragment, descriptor header has been copied */
1032 		ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1033 					      fragment_length, obj->desc_size);
1034 		if (ret != 0) {
1035 			goto err_bad_desc;
1036 		}
1037 
1038 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1039 		obj->desc.flags |= mtd_flag;
1040 	}
1041 
1042 	obj->desc_filled += fragment_length;
1043 
1044 	handle_low = (uint32_t)obj->desc.handle;
1045 	handle_high = obj->desc.handle >> 32;
1046 
1047 	if (obj->desc_filled != obj->desc_size) {
1048 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1049 			 handle_high, obj->desc_filled,
1050 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1051 	}
1052 
1053 	/* The full descriptor has been received, perform any final checks. */
1054 
1055 	ret = spmc_shmem_check_obj(obj, ffa_version);
1056 	if (ret != 0) {
1057 		goto err_bad_desc;
1058 	}
1059 
1060 	/* Ensure partition IDs are not duplicated. */
1061 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
1062 		emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
1063 					       &emad_size);
1064 
1065 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
1066 			other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
1067 							     ffa_version,
1068 							     &emad_size);
1069 
1070 			if (emad->mapd.endpoint_id ==
1071 				other_emad->mapd.endpoint_id) {
1072 				WARN("%s: Duplicated endpoint id 0x%x\n",
1073 				     __func__, emad->mapd.endpoint_id);
1074 				ret = FFA_ERROR_INVALID_PARAMETER;
1075 				goto err_bad_desc;
1076 			}
1077 		}
1078 	}
1079 
1080 	ret = spmc_shmem_check_state_obj(obj, ffa_version);
1081 	if (ret) {
1082 		ERROR("%s: invalid memory region descriptor.\n", __func__);
1083 		goto err_bad_desc;
1084 	}
1085 
1086 	/*
1087 	 * Everything checks out, if the sender was using FF-A v1.0, convert
1088 	 * the descriptor format to use the v1.1 structures.
1089 	 */
1090 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1091 		struct spmc_shmem_obj *v1_1_obj;
1092 		uint64_t mem_handle;
1093 
1094 		/* Calculate the size that the v1.1 descriptor will required. */
1095 		uint64_t v1_1_desc_size =
1096 		    spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1097 						      obj->desc_size);
1098 
1099 		if (v1_1_desc_size > UINT32_MAX) {
1100 			ret = FFA_ERROR_NO_MEMORY;
1101 			goto err_arg;
1102 		}
1103 
1104 		/* Get a new obj to store the v1.1 descriptor. */
1105 		v1_1_obj =
1106 		    spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
1107 
1108 		if (!v1_1_obj) {
1109 			ret = FFA_ERROR_NO_MEMORY;
1110 			goto err_arg;
1111 		}
1112 
1113 		/* Perform the conversion from v1.0 to v1.1. */
1114 		v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1115 		v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
1116 		if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1117 			ERROR("%s: Could not convert mtd!\n", __func__);
1118 			spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1119 			goto err_arg;
1120 		}
1121 
1122 		/*
1123 		 * We're finished with the v1.0 descriptor so free it
1124 		 * and continue our checks with the new v1.1 descriptor.
1125 		 */
1126 		mem_handle = obj->desc.handle;
1127 		spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1128 		obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1129 		if (obj == NULL) {
1130 			ERROR("%s: Failed to find converted descriptor.\n",
1131 			     __func__);
1132 			ret = FFA_ERROR_INVALID_PARAMETER;
1133 			return spmc_ffa_error_return(smc_handle, ret);
1134 		}
1135 	}
1136 
1137 	/* Allow for platform specific operations to be performed. */
1138 	ret = plat_spmc_shmem_begin(&obj->desc);
1139 	if (ret != 0) {
1140 		goto err_arg;
1141 	}
1142 
1143 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1144 		 0, 0, 0);
1145 
1146 err_bad_desc:
1147 err_arg:
1148 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1149 	return spmc_ffa_error_return(smc_handle, ret);
1150 }
1151 
1152 /**
1153  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1154  * @client:             Client state.
1155  * @total_length:       Total length of shared memory descriptor.
1156  * @fragment_length:    Length of fragment of shared memory descriptor passed in
1157  *                      this call.
1158  * @address:            Not supported, must be 0.
1159  * @page_count:         Not supported, must be 0.
1160  * @smc_handle:         Handle passed to smc call. Used to return
1161  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1162  *
1163  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1164  * to share or lend memory from non-secure os to secure os (with no stream
1165  * endpoints).
1166  *
1167  * Return: 0 on success, error code on failure.
1168  */
1169 long spmc_ffa_mem_send(uint32_t smc_fid,
1170 			bool secure_origin,
1171 			uint64_t total_length,
1172 			uint32_t fragment_length,
1173 			uint64_t address,
1174 			uint32_t page_count,
1175 			void *cookie,
1176 			void *handle,
1177 			uint64_t flags)
1178 
1179 {
1180 	long ret;
1181 	struct spmc_shmem_obj *obj;
1182 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1183 	ffa_mtd_flag32_t mtd_flag;
1184 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1185 	size_t min_desc_size;
1186 
1187 	if (address != 0U || page_count != 0U) {
1188 		WARN("%s: custom memory region for message not supported.\n",
1189 		     __func__);
1190 		return spmc_ffa_error_return(handle,
1191 					     FFA_ERROR_INVALID_PARAMETER);
1192 	}
1193 
1194 	if (secure_origin) {
1195 		WARN("%s: unsupported share direction.\n", __func__);
1196 		return spmc_ffa_error_return(handle,
1197 					     FFA_ERROR_INVALID_PARAMETER);
1198 	}
1199 
1200 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1201 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1202 	} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
1203 		min_desc_size = sizeof(struct ffa_mtd);
1204 	} else {
1205 		WARN("%s: bad FF-A version.\n", __func__);
1206 		return spmc_ffa_error_return(handle,
1207 					     FFA_ERROR_INVALID_PARAMETER);
1208 	}
1209 
1210 	/* Check if the descriptor is too small for the FF-A version. */
1211 	if (fragment_length < min_desc_size) {
1212 		WARN("%s: bad first fragment size %u < %zu\n",
1213 		     __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1214 		return spmc_ffa_error_return(handle,
1215 					     FFA_ERROR_INVALID_PARAMETER);
1216 	}
1217 
1218 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1219 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1220 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1221 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1222 	} else {
1223 		WARN("%s: invalid memory management operation.\n", __func__);
1224 		return spmc_ffa_error_return(handle,
1225 					     FFA_ERROR_INVALID_PARAMETER);
1226 	}
1227 
1228 	spin_lock(&spmc_shmem_obj_state.lock);
1229 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1230 	if (obj == NULL) {
1231 		ret = FFA_ERROR_NO_MEMORY;
1232 		goto err_unlock;
1233 	}
1234 
1235 	spin_lock(&mbox->lock);
1236 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1237 				 ffa_version, handle);
1238 	spin_unlock(&mbox->lock);
1239 
1240 	spin_unlock(&spmc_shmem_obj_state.lock);
1241 	return ret;
1242 
1243 err_unlock:
1244 	spin_unlock(&spmc_shmem_obj_state.lock);
1245 	return spmc_ffa_error_return(handle, ret);
1246 }
1247 
1248 /**
1249  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1250  * @client:             Client state.
1251  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
1252  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
1253  * @fragment_length:    Length of fragments transmitted.
1254  * @sender_id:          Vmid of sender in bits [31:16]
1255  * @smc_handle:         Handle passed to smc call. Used to return
1256  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1257  *
1258  * Return: @smc_handle on success, error code on failure.
1259  */
1260 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1261 			  bool secure_origin,
1262 			  uint64_t handle_low,
1263 			  uint64_t handle_high,
1264 			  uint32_t fragment_length,
1265 			  uint32_t sender_id,
1266 			  void *cookie,
1267 			  void *handle,
1268 			  uint64_t flags)
1269 {
1270 	long ret;
1271 	uint32_t desc_sender_id;
1272 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1273 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1274 
1275 	struct spmc_shmem_obj *obj;
1276 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1277 
1278 	spin_lock(&spmc_shmem_obj_state.lock);
1279 
1280 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1281 	if (obj == NULL) {
1282 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1283 		     __func__, mem_handle);
1284 		ret = FFA_ERROR_INVALID_PARAMETER;
1285 		goto err_unlock;
1286 	}
1287 
1288 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1289 	if (sender_id != desc_sender_id) {
1290 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1291 		     sender_id, desc_sender_id);
1292 		ret = FFA_ERROR_INVALID_PARAMETER;
1293 		goto err_unlock;
1294 	}
1295 
1296 	if (obj->desc_filled == obj->desc_size) {
1297 		WARN("%s: object desc already filled, %zu\n", __func__,
1298 		     obj->desc_filled);
1299 		ret = FFA_ERROR_INVALID_PARAMETER;
1300 		goto err_unlock;
1301 	}
1302 
1303 	spin_lock(&mbox->lock);
1304 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1305 				 handle);
1306 	spin_unlock(&mbox->lock);
1307 
1308 	spin_unlock(&spmc_shmem_obj_state.lock);
1309 	return ret;
1310 
1311 err_unlock:
1312 	spin_unlock(&spmc_shmem_obj_state.lock);
1313 	return spmc_ffa_error_return(handle, ret);
1314 }
1315 
1316 /**
1317  * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
1318  *				      if the caller implements a version greater
1319  *				      than FF-A 1.0 or if they have requested
1320  *				      the functionality.
1321  *				      TODO: We are assuming that the caller is
1322  *				      an SP. To support retrieval from the
1323  *				      normal world this function will need to be
1324  *				      expanded accordingly.
1325  * @resp:       Descriptor populated in callers RX buffer.
1326  * @sp_ctx:     Context of the calling SP.
1327  */
1328 void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
1329 			 struct secure_partition_desc *sp_ctx)
1330 {
1331 	if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
1332 	    sp_ctx->ns_bit_requested) {
1333 		/*
1334 		 * Currently memory senders must reside in the normal
1335 		 * world, and we do not have the functionlaity to change
1336 		 * the state of memory dynamically. Therefore we can always set
1337 		 * the NS bit to 1.
1338 		 */
1339 		resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1340 	}
1341 }
1342 
1343 /**
1344  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1345  * @smc_fid:            FID of SMC
1346  * @total_length:       Total length of retrieve request descriptor if this is
1347  *                      the first call. Otherwise (unsupported) must be 0.
1348  * @fragment_length:    Length of fragment of retrieve request descriptor passed
1349  *                      in this call. Only @fragment_length == @length is
1350  *                      supported by this implementation.
1351  * @address:            Not supported, must be 0.
1352  * @page_count:         Not supported, must be 0.
1353  * @smc_handle:         Handle passed to smc call. Used to return
1354  *                      FFA_MEM_RETRIEVE_RESP.
1355  *
1356  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1357  * Used by secure os to retrieve memory already shared by non-secure os.
1358  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1359  * the client must call FFA_MEM_FRAG_RX until the full response has been
1360  * received.
1361  *
1362  * Return: @handle on success, error code on failure.
1363  */
1364 long
1365 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1366 			  bool secure_origin,
1367 			  uint32_t total_length,
1368 			  uint32_t fragment_length,
1369 			  uint64_t address,
1370 			  uint32_t page_count,
1371 			  void *cookie,
1372 			  void *handle,
1373 			  uint64_t flags)
1374 {
1375 	int ret;
1376 	size_t buf_size;
1377 	size_t copy_size = 0;
1378 	size_t min_desc_size;
1379 	size_t out_desc_size = 0;
1380 
1381 	/*
1382 	 * Currently we are only accessing fields that are the same in both the
1383 	 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1384 	 * here. We only need validate against the appropriate struct size.
1385 	 */
1386 	struct ffa_mtd *resp;
1387 	const struct ffa_mtd *req;
1388 	struct spmc_shmem_obj *obj = NULL;
1389 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1390 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1391 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1392 
1393 	if (!secure_origin) {
1394 		WARN("%s: unsupported retrieve req direction.\n", __func__);
1395 		return spmc_ffa_error_return(handle,
1396 					     FFA_ERROR_INVALID_PARAMETER);
1397 	}
1398 
1399 	if (address != 0U || page_count != 0U) {
1400 		WARN("%s: custom memory region not supported.\n", __func__);
1401 		return spmc_ffa_error_return(handle,
1402 					     FFA_ERROR_INVALID_PARAMETER);
1403 	}
1404 
1405 	spin_lock(&mbox->lock);
1406 
1407 	req = mbox->tx_buffer;
1408 	resp = mbox->rx_buffer;
1409 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1410 
1411 	if (mbox->rxtx_page_count == 0U) {
1412 		WARN("%s: buffer pair not registered.\n", __func__);
1413 		ret = FFA_ERROR_INVALID_PARAMETER;
1414 		goto err_unlock_mailbox;
1415 	}
1416 
1417 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1418 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1419 		ret = FFA_ERROR_DENIED;
1420 		goto err_unlock_mailbox;
1421 	}
1422 
1423 	if (fragment_length != total_length) {
1424 		WARN("%s: fragmented retrieve request not supported.\n",
1425 		     __func__);
1426 		ret = FFA_ERROR_INVALID_PARAMETER;
1427 		goto err_unlock_mailbox;
1428 	}
1429 
1430 	if (req->emad_count == 0U) {
1431 		WARN("%s: unsupported attribute desc count %u.\n",
1432 		     __func__, obj->desc.emad_count);
1433 		ret = FFA_ERROR_INVALID_PARAMETER;
1434 		goto err_unlock_mailbox;
1435 	}
1436 
1437 	/* Determine the appropriate minimum descriptor size. */
1438 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1439 		min_desc_size = sizeof(struct ffa_mtd_v1_0);
1440 	} else {
1441 		min_desc_size = sizeof(struct ffa_mtd);
1442 	}
1443 	if (total_length < min_desc_size) {
1444 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1445 		     min_desc_size);
1446 		ret = FFA_ERROR_INVALID_PARAMETER;
1447 		goto err_unlock_mailbox;
1448 	}
1449 
1450 	spin_lock(&spmc_shmem_obj_state.lock);
1451 
1452 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1453 	if (obj == NULL) {
1454 		ret = FFA_ERROR_INVALID_PARAMETER;
1455 		goto err_unlock_all;
1456 	}
1457 
1458 	if (obj->desc_filled != obj->desc_size) {
1459 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1460 		     __func__, obj->desc_filled, obj->desc_size);
1461 		ret = FFA_ERROR_INVALID_PARAMETER;
1462 		goto err_unlock_all;
1463 	}
1464 
1465 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1466 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
1467 		     __func__, req->sender_id, obj->desc.sender_id);
1468 		ret = FFA_ERROR_INVALID_PARAMETER;
1469 		goto err_unlock_all;
1470 	}
1471 
1472 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1473 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1474 		     __func__, req->tag, obj->desc.tag);
1475 		ret = FFA_ERROR_INVALID_PARAMETER;
1476 		goto err_unlock_all;
1477 	}
1478 
1479 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1480 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
1481 		     __func__, req->emad_count, obj->desc.emad_count);
1482 		ret = FFA_ERROR_INVALID_PARAMETER;
1483 		goto err_unlock_all;
1484 	}
1485 
1486 	/* Ensure the NS bit is set to 0 in the request. */
1487 	if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1488 		WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1489 		ret = FFA_ERROR_INVALID_PARAMETER;
1490 		goto err_unlock_all;
1491 	}
1492 
1493 	if (req->flags != 0U) {
1494 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1495 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1496 			/*
1497 			 * If the retrieve request specifies the memory
1498 			 * transaction ensure it matches what we expect.
1499 			 */
1500 			WARN("%s: wrong mem transaction flags %x != %x\n",
1501 			__func__, req->flags, obj->desc.flags);
1502 			ret = FFA_ERROR_INVALID_PARAMETER;
1503 			goto err_unlock_all;
1504 		}
1505 
1506 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1507 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1508 			/*
1509 			 * Current implementation does not support donate and
1510 			 * it supports no other flags.
1511 			 */
1512 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1513 			ret = FFA_ERROR_INVALID_PARAMETER;
1514 			goto err_unlock_all;
1515 		}
1516 	}
1517 
1518 	/* Validate the caller is a valid participant. */
1519 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1520 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1521 			__func__, sp_ctx->sp_id);
1522 		ret = FFA_ERROR_INVALID_PARAMETER;
1523 		goto err_unlock_all;
1524 	}
1525 
1526 	/* Validate that the provided emad offset and structure is valid.*/
1527 	for (size_t i = 0; i < req->emad_count; i++) {
1528 		size_t emad_size;
1529 		struct ffa_emad_v1_0 *emad;
1530 
1531 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1532 					       &emad_size);
1533 
1534 		if ((uintptr_t) emad >= (uintptr_t)
1535 					((uint8_t *) req + total_length)) {
1536 			WARN("Invalid emad access.\n");
1537 			ret = FFA_ERROR_INVALID_PARAMETER;
1538 			goto err_unlock_all;
1539 		}
1540 	}
1541 
1542 	/*
1543 	 * Validate all the endpoints match in the case of multiple
1544 	 * borrowers. We don't mandate that the order of the borrowers
1545 	 * must match in the descriptors therefore check to see if the
1546 	 * endpoints match in any order.
1547 	 */
1548 	for (size_t i = 0; i < req->emad_count; i++) {
1549 		bool found = false;
1550 		size_t emad_size;
1551 		struct ffa_emad_v1_0 *emad;
1552 		struct ffa_emad_v1_0 *other_emad;
1553 
1554 		emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1555 					       &emad_size);
1556 
1557 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
1558 			other_emad = spmc_shmem_obj_get_emad(
1559 					&obj->desc, j, MAKE_FFA_VERSION(1, 1),
1560 					&emad_size);
1561 
1562 			if (req->emad_count &&
1563 			    emad->mapd.endpoint_id ==
1564 			    other_emad->mapd.endpoint_id) {
1565 				found = true;
1566 				break;
1567 			}
1568 		}
1569 
1570 		if (!found) {
1571 			WARN("%s: invalid receiver id (0x%x).\n",
1572 			     __func__, emad->mapd.endpoint_id);
1573 			ret = FFA_ERROR_INVALID_PARAMETER;
1574 			goto err_unlock_all;
1575 		}
1576 	}
1577 
1578 	mbox->state = MAILBOX_STATE_FULL;
1579 
1580 	if (req->emad_count != 0U) {
1581 		obj->in_use++;
1582 	}
1583 
1584 	/*
1585 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1586 	 * directly.
1587 	 */
1588 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1589 		ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1590 							&copy_size,
1591 							&out_desc_size);
1592 		if (ret != 0U) {
1593 			ERROR("%s: Failed to process descriptor.\n", __func__);
1594 			goto err_unlock_all;
1595 		}
1596 	} else {
1597 		copy_size = MIN(obj->desc_size, buf_size);
1598 		out_desc_size = obj->desc_size;
1599 
1600 		memcpy(resp, &obj->desc, copy_size);
1601 	}
1602 
1603 	/* Set the NS bit in the response if applicable. */
1604 	spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
1605 
1606 	spin_unlock(&spmc_shmem_obj_state.lock);
1607 	spin_unlock(&mbox->lock);
1608 
1609 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1610 		 copy_size, 0, 0, 0, 0, 0);
1611 
1612 err_unlock_all:
1613 	spin_unlock(&spmc_shmem_obj_state.lock);
1614 err_unlock_mailbox:
1615 	spin_unlock(&mbox->lock);
1616 	return spmc_ffa_error_return(handle, ret);
1617 }
1618 
1619 /**
1620  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1621  * @client:             Client state.
1622  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1623  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1624  * @fragment_offset:    Byte offset in descriptor to resume at.
1625  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
1626  *                      hypervisor. 0 otherwise.
1627  * @smc_handle:         Handle passed to smc call. Used to return
1628  *                      FFA_MEM_FRAG_TX.
1629  *
1630  * Return: @smc_handle on success, error code on failure.
1631  */
1632 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1633 			  bool secure_origin,
1634 			  uint32_t handle_low,
1635 			  uint32_t handle_high,
1636 			  uint32_t fragment_offset,
1637 			  uint32_t sender_id,
1638 			  void *cookie,
1639 			  void *handle,
1640 			  uint64_t flags)
1641 {
1642 	int ret;
1643 	void *src;
1644 	size_t buf_size;
1645 	size_t copy_size;
1646 	size_t full_copy_size;
1647 	uint32_t desc_sender_id;
1648 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1649 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1650 	struct spmc_shmem_obj *obj;
1651 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1652 
1653 	if (!secure_origin) {
1654 		WARN("%s: can only be called from swld.\n",
1655 		     __func__);
1656 		return spmc_ffa_error_return(handle,
1657 					     FFA_ERROR_INVALID_PARAMETER);
1658 	}
1659 
1660 	spin_lock(&spmc_shmem_obj_state.lock);
1661 
1662 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1663 	if (obj == NULL) {
1664 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1665 		     __func__, mem_handle);
1666 		ret = FFA_ERROR_INVALID_PARAMETER;
1667 		goto err_unlock_shmem;
1668 	}
1669 
1670 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1671 	if (sender_id != 0U && sender_id != desc_sender_id) {
1672 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1673 		     sender_id, desc_sender_id);
1674 		ret = FFA_ERROR_INVALID_PARAMETER;
1675 		goto err_unlock_shmem;
1676 	}
1677 
1678 	if (fragment_offset >= obj->desc_size) {
1679 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
1680 		     __func__, fragment_offset, obj->desc_size);
1681 		ret = FFA_ERROR_INVALID_PARAMETER;
1682 		goto err_unlock_shmem;
1683 	}
1684 
1685 	spin_lock(&mbox->lock);
1686 
1687 	if (mbox->rxtx_page_count == 0U) {
1688 		WARN("%s: buffer pair not registered.\n", __func__);
1689 		ret = FFA_ERROR_INVALID_PARAMETER;
1690 		goto err_unlock_all;
1691 	}
1692 
1693 	if (mbox->state != MAILBOX_STATE_EMPTY) {
1694 		WARN("%s: RX Buffer is full!\n", __func__);
1695 		ret = FFA_ERROR_DENIED;
1696 		goto err_unlock_all;
1697 	}
1698 
1699 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1700 
1701 	mbox->state = MAILBOX_STATE_FULL;
1702 
1703 	/*
1704 	 * If the caller is v1.0 convert the descriptor, otherwise copy
1705 	 * directly.
1706 	 */
1707 	if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1708 		size_t out_desc_size;
1709 
1710 		ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1711 							buf_size,
1712 							fragment_offset,
1713 							&copy_size,
1714 							&out_desc_size);
1715 		if (ret != 0U) {
1716 			ERROR("%s: Failed to process descriptor.\n", __func__);
1717 			goto err_unlock_all;
1718 		}
1719 	} else {
1720 		full_copy_size = obj->desc_size - fragment_offset;
1721 		copy_size = MIN(full_copy_size, buf_size);
1722 
1723 		src = &obj->desc;
1724 
1725 		memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
1726 	}
1727 
1728 	spin_unlock(&mbox->lock);
1729 	spin_unlock(&spmc_shmem_obj_state.lock);
1730 
1731 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1732 		 copy_size, sender_id, 0, 0, 0);
1733 
1734 err_unlock_all:
1735 	spin_unlock(&mbox->lock);
1736 err_unlock_shmem:
1737 	spin_unlock(&spmc_shmem_obj_state.lock);
1738 	return spmc_ffa_error_return(handle, ret);
1739 }
1740 
1741 /**
1742  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1743  * @client:             Client state.
1744  *
1745  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1746  * Used by secure os release previously shared memory to non-secure os.
1747  *
1748  * The handle to release must be in the client's (secure os's) transmit buffer.
1749  *
1750  * Return: 0 on success, error code on failure.
1751  */
1752 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1753 			    bool secure_origin,
1754 			    uint32_t handle_low,
1755 			    uint32_t handle_high,
1756 			    uint32_t fragment_offset,
1757 			    uint32_t sender_id,
1758 			    void *cookie,
1759 			    void *handle,
1760 			    uint64_t flags)
1761 {
1762 	int ret;
1763 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1764 	struct spmc_shmem_obj *obj;
1765 	const struct ffa_mem_relinquish_descriptor *req;
1766 	struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1767 
1768 	if (!secure_origin) {
1769 		WARN("%s: unsupported relinquish direction.\n", __func__);
1770 		return spmc_ffa_error_return(handle,
1771 					     FFA_ERROR_INVALID_PARAMETER);
1772 	}
1773 
1774 	spin_lock(&mbox->lock);
1775 
1776 	if (mbox->rxtx_page_count == 0U) {
1777 		WARN("%s: buffer pair not registered.\n", __func__);
1778 		ret = FFA_ERROR_INVALID_PARAMETER;
1779 		goto err_unlock_mailbox;
1780 	}
1781 
1782 	req = mbox->tx_buffer;
1783 
1784 	if (req->flags != 0U) {
1785 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1786 		ret = FFA_ERROR_INVALID_PARAMETER;
1787 		goto err_unlock_mailbox;
1788 	}
1789 
1790 	if (req->endpoint_count == 0) {
1791 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1792 		ret = FFA_ERROR_INVALID_PARAMETER;
1793 		goto err_unlock_mailbox;
1794 	}
1795 
1796 	spin_lock(&spmc_shmem_obj_state.lock);
1797 
1798 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1799 	if (obj == NULL) {
1800 		ret = FFA_ERROR_INVALID_PARAMETER;
1801 		goto err_unlock_all;
1802 	}
1803 
1804 	/*
1805 	 * Validate the endpoint ID was populated correctly. We don't currently
1806 	 * support proxy endpoints so the endpoint count should always be 1.
1807 	 */
1808 	if (req->endpoint_count != 1U) {
1809 		WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1810 		     req->endpoint_count);
1811 		ret = FFA_ERROR_INVALID_PARAMETER;
1812 		goto err_unlock_all;
1813 	}
1814 
1815 	/* Validate provided endpoint ID matches the partition ID. */
1816 	if (req->endpoint_array[0] != sp_ctx->sp_id) {
1817 		WARN("%s: invalid endpoint ID %u != %u\n", __func__,
1818 		     req->endpoint_array[0], sp_ctx->sp_id);
1819 		ret = FFA_ERROR_INVALID_PARAMETER;
1820 		goto err_unlock_all;
1821 	}
1822 
1823 	/* Validate the caller is a valid participant. */
1824 	if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1825 		WARN("%s: Invalid endpoint ID (0x%x).\n",
1826 			__func__, req->endpoint_array[0]);
1827 		ret = FFA_ERROR_INVALID_PARAMETER;
1828 		goto err_unlock_all;
1829 	}
1830 
1831 	if (obj->in_use == 0U) {
1832 		ret = FFA_ERROR_INVALID_PARAMETER;
1833 		goto err_unlock_all;
1834 	}
1835 	obj->in_use--;
1836 
1837 	spin_unlock(&spmc_shmem_obj_state.lock);
1838 	spin_unlock(&mbox->lock);
1839 
1840 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1841 
1842 err_unlock_all:
1843 	spin_unlock(&spmc_shmem_obj_state.lock);
1844 err_unlock_mailbox:
1845 	spin_unlock(&mbox->lock);
1846 	return spmc_ffa_error_return(handle, ret);
1847 }
1848 
1849 /**
1850  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1851  * @client:         Client state.
1852  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1853  * @handle_high:    Unique handle of shared memory object to reclaim.
1854  *                  Bit[63:32].
1855  * @flags:          Unsupported, ignored.
1856  *
1857  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1858  * Used by non-secure os reclaim memory previously shared with secure os.
1859  *
1860  * Return: 0 on success, error code on failure.
1861  */
1862 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1863 			 bool secure_origin,
1864 			 uint32_t handle_low,
1865 			 uint32_t handle_high,
1866 			 uint32_t mem_flags,
1867 			 uint64_t x4,
1868 			 void *cookie,
1869 			 void *handle,
1870 			 uint64_t flags)
1871 {
1872 	int ret;
1873 	struct spmc_shmem_obj *obj;
1874 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1875 
1876 	if (secure_origin) {
1877 		WARN("%s: unsupported reclaim direction.\n", __func__);
1878 		return spmc_ffa_error_return(handle,
1879 					     FFA_ERROR_INVALID_PARAMETER);
1880 	}
1881 
1882 	if (mem_flags != 0U) {
1883 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1884 		return spmc_ffa_error_return(handle,
1885 					     FFA_ERROR_INVALID_PARAMETER);
1886 	}
1887 
1888 	spin_lock(&spmc_shmem_obj_state.lock);
1889 
1890 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1891 	if (obj == NULL) {
1892 		ret = FFA_ERROR_INVALID_PARAMETER;
1893 		goto err_unlock;
1894 	}
1895 	if (obj->in_use != 0U) {
1896 		ret = FFA_ERROR_DENIED;
1897 		goto err_unlock;
1898 	}
1899 
1900 	if (obj->desc_filled != obj->desc_size) {
1901 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
1902 		     __func__, obj->desc_filled, obj->desc_size);
1903 		ret = FFA_ERROR_INVALID_PARAMETER;
1904 		goto err_unlock;
1905 	}
1906 
1907 	/* Allow for platform specific operations to be performed. */
1908 	ret = plat_spmc_shmem_reclaim(&obj->desc);
1909 	if (ret != 0) {
1910 		goto err_unlock;
1911 	}
1912 
1913 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1914 	spin_unlock(&spmc_shmem_obj_state.lock);
1915 
1916 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1917 
1918 err_unlock:
1919 	spin_unlock(&spmc_shmem_obj_state.lock);
1920 	return spmc_ffa_error_return(handle, ret);
1921 }
1922