xref: /rk3399_ARM-atf/services/std_svc/spm/el3_spmc/spmc_shared_mem.c (revision fef85e1e53fcf44e8d9ed50c89d8a764bf1b7738)
1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <assert.h>
7 #include <errno.h>
8 
9 #include <common/debug.h>
10 #include <common/runtime_svc.h>
11 #include <lib/object_pool.h>
12 #include <lib/spinlock.h>
13 #include <lib/xlat_tables/xlat_tables_v2.h>
14 #include <services/ffa_svc.h>
15 #include "spmc.h"
16 #include "spmc_shared_mem.h"
17 
18 #include <platform_def.h>
19 
20 /**
21  * struct spmc_shmem_obj - Shared memory object.
22  * @desc_size:      Size of @desc.
23  * @desc_filled:    Size of @desc already received.
24  * @in_use:         Number of clients that have called ffa_mem_retrieve_req
25  *                  without a matching ffa_mem_relinquish call.
26  * @desc:           FF-A memory region descriptor passed in ffa_mem_share.
27  */
28 struct spmc_shmem_obj {
29 	size_t desc_size;
30 	size_t desc_filled;
31 	size_t in_use;
32 	struct ffa_mtd_v1_0 desc;
33 };
34 
35 /*
36  * Declare our data structure to store the metadata of memory share requests.
37  * The main datastore is allocated on a per platform basis to ensure enough
38  * storage can be made available.
39  * The address of the data store will be populated by the SPMC during its
40  * initialization.
41  */
42 
43 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
44 	/* Set start value for handle so top 32 bits are needed quickly. */
45 	.next_handle = 0xffffffc0U,
46 };
47 
48 /**
49  * spmc_shmem_obj_size - Convert from descriptor size to object size.
50  * @desc_size:  Size of struct ffa_memory_region_descriptor object.
51  *
52  * Return: Size of struct spmc_shmem_obj object.
53  */
54 static size_t spmc_shmem_obj_size(size_t desc_size)
55 {
56 	return desc_size + offsetof(struct spmc_shmem_obj, desc);
57 }
58 
59 /**
60  * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
61  * @state:      Global state.
62  * @desc_size:  Size of struct ffa_memory_region_descriptor object that
63  *              allocated object will hold.
64  *
65  * Return: Pointer to newly allocated object, or %NULL if there not enough space
66  *         left. The returned pointer is only valid while @state is locked, to
67  *         used it again after unlocking @state, spmc_shmem_obj_lookup must be
68  *         called.
69  */
70 static struct spmc_shmem_obj *
71 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
72 {
73 	struct spmc_shmem_obj *obj;
74 	size_t free = state->data_size - state->allocated;
75 
76 	if (state->data == NULL) {
77 		ERROR("Missing shmem datastore!\n");
78 		return NULL;
79 	}
80 
81 	if (spmc_shmem_obj_size(desc_size) > free) {
82 		WARN("%s(0x%zx) failed, free 0x%zx\n",
83 		     __func__, desc_size, free);
84 		return NULL;
85 	}
86 	obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
87 	obj->desc = (struct ffa_mtd_v1_0) {0};
88 	obj->desc_size = desc_size;
89 	obj->desc_filled = 0;
90 	obj->in_use = 0;
91 	state->allocated += spmc_shmem_obj_size(desc_size);
92 	return obj;
93 }
94 
95 /**
96  * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
97  * @state:      Global state.
98  * @obj:        Object to free.
99  *
100  * Release memory used by @obj. Other objects may move, so on return all
101  * pointers to struct spmc_shmem_obj object should be considered invalid, not
102  * just @obj.
103  *
104  * The current implementation always compacts the remaining objects to simplify
105  * the allocator and to avoid fragmentation.
106  */
107 
108 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
109 				  struct spmc_shmem_obj *obj)
110 {
111 	size_t free_size = spmc_shmem_obj_size(obj->desc_size);
112 	uint8_t *shift_dest = (uint8_t *)obj;
113 	uint8_t *shift_src = shift_dest + free_size;
114 	size_t shift_size = state->allocated - (shift_src - state->data);
115 
116 	if (shift_size != 0U) {
117 		memmove(shift_dest, shift_src, shift_size);
118 	}
119 	state->allocated -= free_size;
120 }
121 
122 /**
123  * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
124  * @state:      Global state.
125  * @handle:     Unique handle of object to return.
126  *
127  * Return: struct spmc_shmem_obj_state object with handle matching @handle.
128  *         %NULL, if not object in @state->data has a matching handle.
129  */
130 static struct spmc_shmem_obj *
131 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
132 {
133 	uint8_t *curr = state->data;
134 
135 	while (curr - state->data < state->allocated) {
136 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
137 
138 		if (obj->desc.handle == handle) {
139 			return obj;
140 		}
141 		curr += spmc_shmem_obj_size(obj->desc_size);
142 	}
143 	return NULL;
144 }
145 
146 /**
147  * spmc_shmem_obj_get_next - Get the next memory object from an offset.
148  * @offset:     Offset used to track which objects have previously been
149  *              returned.
150  *
151  * Return: the next struct spmc_shmem_obj_state object from the provided
152  *	   offset.
153  *	   %NULL, if there are no more objects.
154  */
155 static struct spmc_shmem_obj *
156 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
157 {
158 	uint8_t *curr = state->data + *offset;
159 
160 	if (curr - state->data < state->allocated) {
161 		struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
162 
163 		*offset += spmc_shmem_obj_size(obj->desc_size);
164 
165 		return obj;
166 	}
167 	return NULL;
168 }
169 
170 static struct ffa_comp_mrd *
171 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj)
172 {
173 	return (struct ffa_comp_mrd *)
174 		((uint8_t *)(&obj->desc) + obj->desc.emad[0].comp_mrd_offset);
175 }
176 
177 /**
178  * spmc_shmem_obj_ffa_constituent_size - Calculate variable size part of obj.
179  * @obj:    Object containing ffa_memory_region_descriptor.
180  *
181  * Return: Size of ffa_constituent_memory_region_descriptors in @obj.
182  */
183 static size_t
184 spmc_shmem_obj_ffa_constituent_size(struct spmc_shmem_obj *obj)
185 {
186 	return spmc_shmem_obj_get_comp_mrd(obj)->address_range_count *
187 		sizeof(struct ffa_cons_mrd);
188 }
189 
190 /*
191  * Compare two memory regions to determine if any range overlaps with another
192  * ongoing memory transaction.
193  */
194 static bool
195 overlapping_memory_regions(struct ffa_comp_mrd *region1,
196 			   struct ffa_comp_mrd *region2)
197 {
198 	uint64_t region1_start;
199 	uint64_t region1_size;
200 	uint64_t region1_end;
201 	uint64_t region2_start;
202 	uint64_t region2_size;
203 	uint64_t region2_end;
204 
205 	assert(region1 != NULL);
206 	assert(region2 != NULL);
207 
208 	if (region1 == region2) {
209 		return true;
210 	}
211 
212 	/*
213 	 * Check each memory region in the request against existing
214 	 * transactions.
215 	 */
216 	for (size_t i = 0; i < region1->address_range_count; i++) {
217 
218 		region1_start = region1->address_range_array[i].address;
219 		region1_size =
220 			region1->address_range_array[i].page_count *
221 			PAGE_SIZE_4KB;
222 		region1_end = region1_start + region1_size;
223 
224 		for (size_t j = 0; j < region2->address_range_count; j++) {
225 
226 			region2_start = region2->address_range_array[j].address;
227 			region2_size =
228 				region2->address_range_array[j].page_count *
229 				PAGE_SIZE_4KB;
230 			region2_end = region2_start + region2_size;
231 
232 			if ((region1_start >= region2_start &&
233 			     region1_start < region2_end) ||
234 			    (region1_end >= region2_start
235 			     && region1_end < region2_end)) {
236 				WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
237 				     region1_start, region1_end,
238 				     region2_start, region2_end);
239 				return true;
240 			}
241 		}
242 	}
243 	return false;
244 }
245 
246 /**
247  * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
248  * @obj:    Object containing ffa_memory_region_descriptor.
249  *
250  * Return: 0 if object is valid, -EINVAL if constituent_memory_region_descriptor
251  * offset or count is invalid.
252  */
253 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj)
254 {
255 	uint32_t comp_mrd_offset = 0;
256 
257 	if (obj->desc.emad_count == 0U) {
258 		WARN("%s: unsupported attribute desc count %u.\n",
259 		     __func__, obj->desc.emad_count);
260 		return -EINVAL;
261 	}
262 
263 	/*
264 	 * Ensure the emad array lies within the bounds of the descriptor by
265 	 * checking the address of the element past the end of the array.
266 	 */
267 	if ((uintptr_t) &obj->desc.emad[obj->desc.emad_count] >
268 	    (uintptr_t)((uint8_t *) &obj->desc + obj->desc_size)) {
269 		WARN("Invalid emad access.\n");
270 		return -EINVAL;
271 	}
272 
273 	for (size_t emad_num = 0; emad_num < obj->desc.emad_count; emad_num++) {
274 		size_t size;
275 		size_t count;
276 		size_t expected_size;
277 		size_t total_page_count;
278 		struct ffa_comp_mrd *comp;
279 
280 		uint32_t offset = obj->desc.emad[emad_num].comp_mrd_offset;
281 		size_t header_emad_size = sizeof(obj->desc) +
282 			obj->desc.emad_count * sizeof(obj->desc.emad[emad_num]);
283 
284 		if (offset < header_emad_size) {
285 			WARN("%s: invalid object, offset %u < header + emad %zu\n",
286 			     __func__, offset, header_emad_size);
287 			return -EINVAL;
288 		}
289 
290 		size = obj->desc_size;
291 
292 		if (offset > size) {
293 			WARN("%s: invalid object, offset %u > total size %zu\n",
294 			     __func__, offset, obj->desc_size);
295 			return -EINVAL;
296 		}
297 		size -= offset;
298 
299 		if (size < sizeof(struct ffa_comp_mrd)) {
300 			WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
301 			     __func__, offset, obj->desc_size);
302 			return -EINVAL;
303 		}
304 		size -= sizeof(struct ffa_comp_mrd);
305 
306 		count = size / sizeof(struct ffa_cons_mrd);
307 
308 		comp = spmc_shmem_obj_get_comp_mrd(obj);
309 
310 		if (comp->address_range_count != count) {
311 			WARN("%s: invalid object, desc count %u != %zu\n",
312 			     __func__, comp->address_range_count, count);
313 			return -EINVAL;
314 		}
315 
316 		expected_size = offset + sizeof(*comp) +
317 				spmc_shmem_obj_ffa_constituent_size(obj);
318 		if (expected_size != obj->desc_size) {
319 			WARN("%s: invalid object, computed size %zu != size %zu\n",
320 			       __func__, expected_size, obj->desc_size);
321 			return -EINVAL;
322 		}
323 
324 		if (obj->desc_filled < obj->desc_size) {
325 			/*
326 			 * The whole descriptor has not yet been received.
327 			 * Skip final checks.
328 			 */
329 			return 0;
330 		}
331 
332 		/*
333 		 * The offset provided to the composite memory region descriptor
334 		 * should be consistent across endpoint descriptors. Store the
335 		 * first entry and compare against subsequent entries.
336 		 */
337 		if (comp_mrd_offset == 0) {
338 			comp_mrd_offset = offset;
339 		} else {
340 			if (comp_mrd_offset != offset) {
341 				ERROR("%s: mismatching offsets provided, %u != %u\n",
342 				       __func__, offset, comp_mrd_offset);
343 				return -EINVAL;
344 			}
345 		}
346 
347 		total_page_count = 0;
348 
349 		for (size_t i = 0; i < count; i++) {
350 			total_page_count +=
351 				comp->address_range_array[i].page_count;
352 		}
353 		if (comp->total_page_count != total_page_count) {
354 			WARN("%s: invalid object, desc total_page_count %u != %zu\n",
355 			     __func__, comp->total_page_count,
356 			total_page_count);
357 			return -EINVAL;
358 		}
359 	}
360 	return 0;
361 }
362 
363 /**
364  * spmc_shmem_check_state_obj - Check if the descriptor describes memory
365  *				regions that are currently involved with an
366  *				existing memory transactions. This implies that
367  *				the memory is not in a valid state for lending.
368  * @obj:    Object containing ffa_memory_region_descriptor.
369  *
370  * Return: 0 if object is valid, -EINVAL if invalid memory state.
371  */
372 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj)
373 {
374 	size_t obj_offset = 0;
375 	struct spmc_shmem_obj *inflight_obj;
376 
377 	struct ffa_comp_mrd *other_mrd;
378 	struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj);
379 
380 	inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
381 					       &obj_offset);
382 
383 	while (inflight_obj != NULL) {
384 		/*
385 		 * Don't compare the transaction to itself or to partially
386 		 * transmitted descriptors.
387 		 */
388 		if ((obj->desc.handle != inflight_obj->desc.handle) &&
389 		    (obj->desc_size == obj->desc_filled)) {
390 			other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj);
391 
392 			if (overlapping_memory_regions(requested_mrd,
393 						       other_mrd)) {
394 				return -EINVAL;
395 			}
396 		}
397 
398 		inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
399 						       &obj_offset);
400 	}
401 	return 0;
402 }
403 
404 static long spmc_ffa_fill_desc(struct mailbox *mbox,
405 			       struct spmc_shmem_obj *obj,
406 			       uint32_t fragment_length,
407 			       ffa_mtd_flag32_t mtd_flag,
408 			       void *smc_handle)
409 {
410 	int ret;
411 	uint32_t handle_low;
412 	uint32_t handle_high;
413 
414 	if (mbox->rxtx_page_count == 0U) {
415 		WARN("%s: buffer pair not registered.\n", __func__);
416 		ret = -EINVAL;
417 		goto err_arg;
418 	}
419 
420 	if (fragment_length > mbox->rxtx_page_count * PAGE_SIZE_4KB) {
421 		WARN("%s: bad fragment size %u > %u buffer size\n", __func__,
422 		     fragment_length, mbox->rxtx_page_count * PAGE_SIZE_4KB);
423 		ret = -EINVAL;
424 		goto err_arg;
425 	}
426 
427 	memcpy((uint8_t *)&obj->desc + obj->desc_filled,
428 	       (uint8_t *) mbox->tx_buffer,
429 	       fragment_length);
430 
431 	if (fragment_length > obj->desc_size - obj->desc_filled) {
432 		WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
433 		     fragment_length, obj->desc_size - obj->desc_filled);
434 		ret = -EINVAL;
435 		goto err_arg;
436 	}
437 
438 	/* Ensure that the sender ID resides in the normal world. */
439 	if (ffa_is_secure_world_id(obj->desc.sender_id)) {
440 		WARN("%s: Invalid sender ID 0x%x.\n",
441 		     __func__, obj->desc.sender_id);
442 		ret = FFA_ERROR_DENIED;
443 		goto err_arg;
444 	}
445 
446 	/*
447 	 * We don't currently support any optional flags so ensure none are
448 	 * requested.
449 	 */
450 	if (obj->desc.flags != 0U && mtd_flag != 0U &&
451 	    (obj->desc.flags != mtd_flag)) {
452 		WARN("%s: invalid memory transaction flags %u != %u\n",
453 		     __func__, obj->desc.flags, mtd_flag);
454 		ret = -EINVAL;
455 		goto err_arg;
456 	}
457 
458 	if (obj->desc_filled == 0U) {
459 		/* First fragment, descriptor header has been copied */
460 		obj->desc.handle = spmc_shmem_obj_state.next_handle++;
461 		obj->desc.flags |= mtd_flag;
462 	}
463 
464 	obj->desc_filled += fragment_length;
465 
466 	ret = spmc_shmem_check_obj(obj);
467 	if (ret != 0) {
468 		goto err_bad_desc;
469 	}
470 
471 	handle_low = (uint32_t)obj->desc.handle;
472 	handle_high = obj->desc.handle >> 32;
473 
474 	if (obj->desc_filled != obj->desc_size) {
475 		SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
476 			 handle_high, obj->desc_filled,
477 			 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
478 	}
479 
480 	/* The full descriptor has been received, perform any final checks. */
481 
482 	/*
483 	 * If a partition ID resides in the secure world validate that the
484 	 * partition ID is for a known partition. Ignore any partition ID
485 	 * belonging to the normal world as it is assumed the Hypervisor will
486 	 * have validated these.
487 	 */
488 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
489 		ffa_endpoint_id16_t ep_id = obj->desc.emad[i].mapd.endpoint_id;
490 
491 		if (ffa_is_secure_world_id(ep_id)) {
492 			if (spmc_get_sp_ctx(ep_id) == NULL) {
493 				WARN("%s: Invalid receiver id 0x%x\n",
494 				     __func__, ep_id);
495 				ret = FFA_ERROR_INVALID_PARAMETER;
496 				goto err_bad_desc;
497 			}
498 		}
499 	}
500 
501 	/* Ensure partition IDs are not duplicated. */
502 	for (size_t i = 0; i < obj->desc.emad_count; i++) {
503 		for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
504 			if (obj->desc.emad[i].mapd.endpoint_id ==
505 				obj->desc.emad[j].mapd.endpoint_id) {
506 				ret = FFA_ERROR_INVALID_PARAMETER;
507 				goto err_bad_desc;
508 			}
509 		}
510 	}
511 
512 	ret = spmc_shmem_check_state_obj(obj);
513 	if (ret) {
514 		ERROR("%s: invalid memory region descriptor.\n", __func__);
515 		goto err_bad_desc;
516 	}
517 
518 	SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
519 		 0, 0, 0);
520 
521 err_bad_desc:
522 err_arg:
523 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
524 	return spmc_ffa_error_return(smc_handle, FFA_ERROR_INVALID_PARAMETER);
525 }
526 
527 /**
528  * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
529  * @client:             Client state.
530  * @total_length:       Total length of shared memory descriptor.
531  * @fragment_length:    Length of fragment of shared memory descriptor passed in
532  *                      this call.
533  * @address:            Not supported, must be 0.
534  * @page_count:         Not supported, must be 0.
535  * @smc_handle:         Handle passed to smc call. Used to return
536  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
537  *
538  * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
539  * to share or lend memory from non-secure os to secure os (with no stream
540  * endpoints).
541  *
542  * Return: 0 on success, error code on failure.
543  */
544 long spmc_ffa_mem_send(uint32_t smc_fid,
545 			bool secure_origin,
546 			uint64_t total_length,
547 			uint32_t fragment_length,
548 			uint64_t address,
549 			uint32_t page_count,
550 			void *cookie,
551 			void *handle,
552 			uint64_t flags)
553 
554 {
555 	long ret;
556 	struct spmc_shmem_obj *obj;
557 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
558 	ffa_mtd_flag32_t mtd_flag;
559 
560 	if (address != 0U || page_count != 0U) {
561 		WARN("%s: custom memory region for message not supported.\n",
562 		     __func__);
563 		return spmc_ffa_error_return(handle,
564 					     FFA_ERROR_INVALID_PARAMETER);
565 	}
566 
567 	if (secure_origin) {
568 		WARN("%s: unsupported share direction.\n", __func__);
569 		return spmc_ffa_error_return(handle,
570 					     FFA_ERROR_INVALID_PARAMETER);
571 	}
572 
573 	if (fragment_length < sizeof(obj->desc)) {
574 		WARN("%s: bad first fragment size %u < %zu\n",
575 		     __func__, fragment_length, sizeof(obj->desc));
576 		return spmc_ffa_error_return(handle,
577 					     FFA_ERROR_INVALID_PARAMETER);
578 	}
579 
580 	if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
581 		mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
582 	} else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
583 		mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
584 	} else {
585 		WARN("%s: invalid memory management operation.\n", __func__);
586 		return spmc_ffa_error_return(handle,
587 					     FFA_ERROR_INVALID_PARAMETER);
588 	}
589 
590 	spin_lock(&spmc_shmem_obj_state.lock);
591 
592 	obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
593 	if (obj == NULL) {
594 		ret = FFA_ERROR_NO_MEMORY;
595 		goto err_unlock;
596 	}
597 
598 	spin_lock(&mbox->lock);
599 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag, handle);
600 	spin_unlock(&mbox->lock);
601 
602 	spin_unlock(&spmc_shmem_obj_state.lock);
603 	return ret;
604 
605 err_unlock:
606 	spin_unlock(&spmc_shmem_obj_state.lock);
607 	return spmc_ffa_error_return(handle, ret);
608 }
609 
610 /**
611  * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
612  * @client:             Client state.
613  * @handle_low:         Handle_low value returned from FFA_MEM_FRAG_RX.
614  * @handle_high:        Handle_high value returned from FFA_MEM_FRAG_RX.
615  * @fragment_length:    Length of fragments transmitted.
616  * @sender_id:          Vmid of sender in bits [31:16]
617  * @smc_handle:         Handle passed to smc call. Used to return
618  *                      FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
619  *
620  * Return: @smc_handle on success, error code on failure.
621  */
622 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
623 			  bool secure_origin,
624 			  uint64_t handle_low,
625 			  uint64_t handle_high,
626 			  uint32_t fragment_length,
627 			  uint32_t sender_id,
628 			  void *cookie,
629 			  void *handle,
630 			  uint64_t flags)
631 {
632 	long ret;
633 	uint32_t desc_sender_id;
634 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
635 
636 	struct spmc_shmem_obj *obj;
637 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
638 
639 	spin_lock(&spmc_shmem_obj_state.lock);
640 
641 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
642 	if (obj == NULL) {
643 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
644 		     __func__, mem_handle);
645 		ret = FFA_ERROR_INVALID_PARAMETER;
646 		goto err_unlock;
647 	}
648 
649 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
650 	if (sender_id != desc_sender_id) {
651 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
652 		     sender_id, desc_sender_id);
653 		ret = FFA_ERROR_INVALID_PARAMETER;
654 		goto err_unlock;
655 	}
656 
657 	if (obj->desc_filled == obj->desc_size) {
658 		WARN("%s: object desc already filled, %zu\n", __func__,
659 		     obj->desc_filled);
660 		ret = FFA_ERROR_INVALID_PARAMETER;
661 		goto err_unlock;
662 	}
663 
664 	spin_lock(&mbox->lock);
665 	ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, handle);
666 	spin_unlock(&mbox->lock);
667 
668 	spin_unlock(&spmc_shmem_obj_state.lock);
669 	return ret;
670 
671 err_unlock:
672 	spin_unlock(&spmc_shmem_obj_state.lock);
673 	return spmc_ffa_error_return(handle, ret);
674 }
675 
676 /**
677  * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
678  * @smc_fid:            FID of SMC
679  * @total_length:       Total length of retrieve request descriptor if this is
680  *                      the first call. Otherwise (unsupported) must be 0.
681  * @fragment_length:    Length of fragment of retrieve request descriptor passed
682  *                      in this call. Only @fragment_length == @length is
683  *                      supported by this implementation.
684  * @address:            Not supported, must be 0.
685  * @page_count:         Not supported, must be 0.
686  * @smc_handle:         Handle passed to smc call. Used to return
687  *                      FFA_MEM_RETRIEVE_RESP.
688  *
689  * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
690  * Used by secure os to retrieve memory already shared by non-secure os.
691  * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
692  * the client must call FFA_MEM_FRAG_RX until the full response has been
693  * received.
694  *
695  * Return: @handle on success, error code on failure.
696  */
697 long
698 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
699 			  bool secure_origin,
700 			  uint32_t total_length,
701 			  uint32_t fragment_length,
702 			  uint64_t address,
703 			  uint32_t page_count,
704 			  void *cookie,
705 			  void *handle,
706 			  uint64_t flags)
707 {
708 	int ret;
709 	size_t buf_size;
710 	size_t copy_size;
711 	struct ffa_mtd_v1_0 *resp;
712 	const struct ffa_mtd_v1_0 *req;
713 	struct spmc_shmem_obj *obj = NULL;
714 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
715 
716 	if (!secure_origin) {
717 		WARN("%s: unsupported retrieve req direction.\n", __func__);
718 		return spmc_ffa_error_return(handle,
719 					     FFA_ERROR_INVALID_PARAMETER);
720 	}
721 
722 	if (address != 0U || page_count != 0U) {
723 		WARN("%s: custom memory region not supported.\n", __func__);
724 		return spmc_ffa_error_return(handle,
725 					     FFA_ERROR_INVALID_PARAMETER);
726 	}
727 
728 	spin_lock(&mbox->lock);
729 
730 	req = mbox->tx_buffer;
731 	resp = mbox->rx_buffer;
732 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
733 
734 	if (mbox->rxtx_page_count == 0U) {
735 		WARN("%s: buffer pair not registered.\n", __func__);
736 		ret = FFA_ERROR_INVALID_PARAMETER;
737 		goto err_unlock_mailbox;
738 	}
739 
740 	if (mbox->state != MAILBOX_STATE_EMPTY) {
741 		WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
742 		ret = FFA_ERROR_DENIED;
743 		goto err_unlock_mailbox;
744 	}
745 
746 	if (fragment_length != total_length) {
747 		WARN("%s: fragmented retrieve request not supported.\n",
748 		     __func__);
749 		ret = FFA_ERROR_INVALID_PARAMETER;
750 		goto err_unlock_mailbox;
751 	}
752 
753 	if (req->emad_count == 0U) {
754 		WARN("%s: unsupported attribute desc count %u.\n",
755 		     __func__, obj->desc.emad_count);
756 		return -EINVAL;
757 	}
758 
759 	if (total_length < sizeof(*req)) {
760 		WARN("%s: invalid length %u < %zu\n", __func__, total_length,
761 		     sizeof(*req));
762 		ret = FFA_ERROR_INVALID_PARAMETER;
763 		goto err_unlock_mailbox;
764 	}
765 
766 	spin_lock(&spmc_shmem_obj_state.lock);
767 
768 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
769 	if (obj == NULL) {
770 		ret = FFA_ERROR_INVALID_PARAMETER;
771 		goto err_unlock_all;
772 	}
773 
774 	if (obj->desc_filled != obj->desc_size) {
775 		WARN("%s: incomplete object desc filled %zu < size %zu\n",
776 		     __func__, obj->desc_filled, obj->desc_size);
777 		ret = FFA_ERROR_INVALID_PARAMETER;
778 		goto err_unlock_all;
779 	}
780 
781 	if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
782 		WARN("%s: wrong sender id 0x%x != 0x%x\n",
783 		     __func__, req->sender_id, obj->desc.sender_id);
784 		ret = FFA_ERROR_INVALID_PARAMETER;
785 		goto err_unlock_all;
786 	}
787 
788 	if (req->emad_count != 0U && req->tag != obj->desc.tag) {
789 		WARN("%s: wrong tag 0x%lx != 0x%lx\n",
790 		     __func__, req->tag, obj->desc.tag);
791 		ret = FFA_ERROR_INVALID_PARAMETER;
792 		goto err_unlock_all;
793 	}
794 
795 	if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
796 		WARN("%s: mistmatch of endpoint counts %u != %u\n",
797 		     __func__, req->emad_count, obj->desc.emad_count);
798 		ret = FFA_ERROR_INVALID_PARAMETER;
799 		goto err_unlock_all;
800 	}
801 
802 	if (req->flags != 0U) {
803 		if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
804 		    (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
805 			/*
806 			 * If the retrieve request specifies the memory
807 			 * transaction ensure it matches what we expect.
808 			 */
809 			WARN("%s: wrong mem transaction flags %x != %x\n",
810 			__func__, req->flags, obj->desc.flags);
811 			ret = FFA_ERROR_INVALID_PARAMETER;
812 			goto err_unlock_all;
813 		}
814 
815 		if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
816 		    req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
817 			/*
818 			 * Current implementation does not support donate and
819 			 * it supports no other flags.
820 			 */
821 			WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
822 			ret = FFA_ERROR_INVALID_PARAMETER;
823 			goto err_unlock_all;
824 		}
825 	}
826 
827 	/*
828 	 * Ensure the emad array lies within the bounds of the descriptor by
829 	 * checking the address of the element past the end of the array.
830 	 */
831 	if ((uintptr_t) &req->emad[req->emad_count] >
832 	    (uintptr_t)((uint8_t *) &req + total_length)) {
833 		WARN("Invalid emad access.\n");
834 		return -EINVAL;
835 	}
836 
837 	/*
838 	 * Validate all the endpoints match in the case of multiple
839 	 * borrowers. We don't mandate that the order of the borrowers
840 	 * must match in the descriptors therefore check to see if the
841 	 * endpoints match in any order.
842 	 */
843 	for (size_t i = 0; i < req->emad_count; i++) {
844 		bool found = false;
845 
846 		for (size_t j = 0; j < obj->desc.emad_count; j++) {
847 			if (req->emad[i].mapd.endpoint_id ==
848 			    obj->desc.emad[j].mapd.endpoint_id) {
849 				found = true;
850 				break;
851 			}
852 		}
853 
854 		if (!found) {
855 			WARN("%s: invalid receiver id (0x%x).\n",
856 			     __func__, req->emad[i].mapd.endpoint_id);
857 			ret = FFA_ERROR_INVALID_PARAMETER;
858 			goto err_unlock_all;
859 		}
860 	}
861 
862 	mbox->state = MAILBOX_STATE_FULL;
863 
864 	if (req->emad_count != 0U) {
865 		obj->in_use++;
866 	}
867 
868 	copy_size = MIN(obj->desc_size, buf_size);
869 
870 	memcpy(resp, &obj->desc, copy_size);
871 
872 	spin_unlock(&spmc_shmem_obj_state.lock);
873 	spin_unlock(&mbox->lock);
874 
875 	SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, obj->desc_size,
876 		 copy_size, 0, 0, 0, 0, 0);
877 
878 err_unlock_all:
879 	spin_unlock(&spmc_shmem_obj_state.lock);
880 err_unlock_mailbox:
881 	spin_unlock(&mbox->lock);
882 	return spmc_ffa_error_return(handle, ret);
883 }
884 
885 /**
886  * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
887  * @client:             Client state.
888  * @handle_low:         Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
889  * @handle_high:        Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
890  * @fragment_offset:    Byte offset in descriptor to resume at.
891  * @sender_id:          Bit[31:16]: Endpoint id of sender if client is a
892  *                      hypervisor. 0 otherwise.
893  * @smc_handle:         Handle passed to smc call. Used to return
894  *                      FFA_MEM_FRAG_TX.
895  *
896  * Return: @smc_handle on success, error code on failure.
897  */
898 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
899 			  bool secure_origin,
900 			  uint32_t handle_low,
901 			  uint32_t handle_high,
902 			  uint32_t fragment_offset,
903 			  uint32_t sender_id,
904 			  void *cookie,
905 			  void *handle,
906 			  uint64_t flags)
907 {
908 	int ret;
909 	void *src;
910 	size_t buf_size;
911 	size_t copy_size;
912 	size_t full_copy_size;
913 	uint32_t desc_sender_id;
914 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
915 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
916 	struct spmc_shmem_obj *obj;
917 
918 	if (!secure_origin) {
919 		WARN("%s: can only be called from swld.\n",
920 		     __func__);
921 		return spmc_ffa_error_return(handle,
922 					     FFA_ERROR_INVALID_PARAMETER);
923 	}
924 
925 	spin_lock(&spmc_shmem_obj_state.lock);
926 
927 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
928 	if (obj == NULL) {
929 		WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
930 		     __func__, mem_handle);
931 		ret = FFA_ERROR_INVALID_PARAMETER;
932 		goto err_unlock_shmem;
933 	}
934 
935 	desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
936 	if (sender_id != 0U && sender_id != desc_sender_id) {
937 		WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
938 		     sender_id, desc_sender_id);
939 		ret = FFA_ERROR_INVALID_PARAMETER;
940 		goto err_unlock_shmem;
941 	}
942 
943 	if (fragment_offset >= obj->desc_size) {
944 		WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
945 		     __func__, fragment_offset, obj->desc_size);
946 		ret = FFA_ERROR_INVALID_PARAMETER;
947 		goto err_unlock_shmem;
948 	}
949 
950 	spin_lock(&mbox->lock);
951 
952 	if (mbox->rxtx_page_count == 0U) {
953 		WARN("%s: buffer pair not registered.\n", __func__);
954 		ret = FFA_ERROR_INVALID_PARAMETER;
955 		goto err_unlock_all;
956 	}
957 
958 	if (mbox->state != MAILBOX_STATE_EMPTY) {
959 		WARN("%s: RX Buffer is full!\n", __func__);
960 		ret = FFA_ERROR_DENIED;
961 		goto err_unlock_all;
962 	}
963 
964 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
965 
966 	mbox->state = MAILBOX_STATE_FULL;
967 
968 	full_copy_size = obj->desc_size - fragment_offset;
969 	copy_size = MIN(full_copy_size, buf_size);
970 
971 	src = &obj->desc;
972 
973 	memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
974 
975 	spin_unlock(&mbox->lock);
976 	spin_unlock(&spmc_shmem_obj_state.lock);
977 
978 	SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
979 		 copy_size, sender_id, 0, 0, 0);
980 
981 err_unlock_all:
982 	spin_unlock(&mbox->lock);
983 err_unlock_shmem:
984 	spin_unlock(&spmc_shmem_obj_state.lock);
985 	return spmc_ffa_error_return(handle, ret);
986 }
987 
988 /**
989  * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
990  * @client:             Client state.
991  *
992  * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
993  * Used by secure os release previously shared memory to non-secure os.
994  *
995  * The handle to release must be in the client's (secure os's) transmit buffer.
996  *
997  * Return: 0 on success, error code on failure.
998  */
999 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1000 			    bool secure_origin,
1001 			    uint32_t handle_low,
1002 			    uint32_t handle_high,
1003 			    uint32_t fragment_offset,
1004 			    uint32_t sender_id,
1005 			    void *cookie,
1006 			    void *handle,
1007 			    uint64_t flags)
1008 {
1009 	int ret;
1010 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1011 	struct spmc_shmem_obj *obj;
1012 	const struct ffa_mem_relinquish_descriptor *req;
1013 
1014 	if (!secure_origin) {
1015 		WARN("%s: unsupported relinquish direction.\n", __func__);
1016 		return spmc_ffa_error_return(handle,
1017 					     FFA_ERROR_INVALID_PARAMETER);
1018 	}
1019 
1020 	spin_lock(&mbox->lock);
1021 
1022 	if (mbox->rxtx_page_count == 0U) {
1023 		WARN("%s: buffer pair not registered.\n", __func__);
1024 		ret = FFA_ERROR_INVALID_PARAMETER;
1025 		goto err_unlock_mailbox;
1026 	}
1027 
1028 	req = mbox->tx_buffer;
1029 
1030 	if (req->flags != 0U) {
1031 		WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1032 		ret = FFA_ERROR_INVALID_PARAMETER;
1033 		goto err_unlock_mailbox;
1034 	}
1035 
1036 	if (req->endpoint_count == 0) {
1037 		WARN("%s: endpoint count cannot be 0.\n", __func__);
1038 		ret = FFA_ERROR_INVALID_PARAMETER;
1039 		goto err_unlock_mailbox;
1040 	}
1041 
1042 	spin_lock(&spmc_shmem_obj_state.lock);
1043 
1044 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1045 	if (obj == NULL) {
1046 		ret = FFA_ERROR_INVALID_PARAMETER;
1047 		goto err_unlock_all;
1048 	}
1049 
1050 	if (obj->desc.emad_count != req->endpoint_count) {
1051 		WARN("%s: mismatch of endpoint count %u != %u\n", __func__,
1052 		     obj->desc.emad_count, req->endpoint_count);
1053 		ret = FFA_ERROR_INVALID_PARAMETER;
1054 		goto err_unlock_all;
1055 	}
1056 
1057 	/* Validate requested endpoint IDs match descriptor. */
1058 	for (size_t i = 0; i < req->endpoint_count; i++) {
1059 		bool found = false;
1060 
1061 		for (unsigned int j = 0; j < obj->desc.emad_count; j++) {
1062 			if (req->endpoint_array[i] ==
1063 			    obj->desc.emad[j].mapd.endpoint_id) {
1064 				found = true;
1065 				break;
1066 			}
1067 		}
1068 
1069 		if (!found) {
1070 			WARN("%s: Invalid endpoint ID (0x%x).\n",
1071 			     __func__, req->endpoint_array[i]);
1072 			ret = FFA_ERROR_INVALID_PARAMETER;
1073 			goto err_unlock_all;
1074 		}
1075 	}
1076 
1077 	if (obj->in_use == 0U) {
1078 		ret = FFA_ERROR_INVALID_PARAMETER;
1079 		goto err_unlock_all;
1080 	}
1081 	obj->in_use--;
1082 
1083 	spin_unlock(&spmc_shmem_obj_state.lock);
1084 	spin_unlock(&mbox->lock);
1085 
1086 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1087 
1088 err_unlock_all:
1089 	spin_unlock(&spmc_shmem_obj_state.lock);
1090 err_unlock_mailbox:
1091 	spin_unlock(&mbox->lock);
1092 	return spmc_ffa_error_return(handle, ret);
1093 }
1094 
1095 /**
1096  * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
1097  * @client:         Client state.
1098  * @handle_low:     Unique handle of shared memory object to reclaim. Bit[31:0].
1099  * @handle_high:    Unique handle of shared memory object to reclaim.
1100  *                  Bit[63:32].
1101  * @flags:          Unsupported, ignored.
1102  *
1103  * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
1104  * Used by non-secure os reclaim memory previously shared with secure os.
1105  *
1106  * Return: 0 on success, error code on failure.
1107  */
1108 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
1109 			 bool secure_origin,
1110 			 uint32_t handle_low,
1111 			 uint32_t handle_high,
1112 			 uint32_t mem_flags,
1113 			 uint64_t x4,
1114 			 void *cookie,
1115 			 void *handle,
1116 			 uint64_t flags)
1117 {
1118 	int ret;
1119 	struct spmc_shmem_obj *obj;
1120 	uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1121 
1122 	if (secure_origin) {
1123 		WARN("%s: unsupported reclaim direction.\n", __func__);
1124 		return spmc_ffa_error_return(handle,
1125 					     FFA_ERROR_INVALID_PARAMETER);
1126 	}
1127 
1128 	if (mem_flags != 0U) {
1129 		WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
1130 		return spmc_ffa_error_return(handle,
1131 					     FFA_ERROR_INVALID_PARAMETER);
1132 	}
1133 
1134 	spin_lock(&spmc_shmem_obj_state.lock);
1135 
1136 	obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1137 	if (obj == NULL) {
1138 		ret = FFA_ERROR_INVALID_PARAMETER;
1139 		goto err_unlock;
1140 	}
1141 	if (obj->in_use != 0U) {
1142 		ret = FFA_ERROR_DENIED;
1143 		goto err_unlock;
1144 	}
1145 	spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1146 	spin_unlock(&spmc_shmem_obj_state.lock);
1147 
1148 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1149 
1150 err_unlock:
1151 	spin_unlock(&spmc_shmem_obj_state.lock);
1152 	return spmc_ffa_error_return(handle, ret);
1153 }
1154