1e0b1a6d5SMarc Bonnici /*
252d8d506SDemi Marie Obenour * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3e0b1a6d5SMarc Bonnici *
4e0b1a6d5SMarc Bonnici * SPDX-License-Identifier: BSD-3-Clause
5e0b1a6d5SMarc Bonnici */
6fef85e1eSMarc Bonnici #include <assert.h>
7e0b1a6d5SMarc Bonnici #include <errno.h>
856c052d3SDemi Marie Obenour #include <inttypes.h>
9e0b1a6d5SMarc Bonnici
10e0b1a6d5SMarc Bonnici #include <common/debug.h>
11e0b1a6d5SMarc Bonnici #include <common/runtime_svc.h>
12e0b1a6d5SMarc Bonnici #include <lib/object_pool.h>
13e0b1a6d5SMarc Bonnici #include <lib/spinlock.h>
14e0b1a6d5SMarc Bonnici #include <lib/xlat_tables/xlat_tables_v2.h>
15e0b1a6d5SMarc Bonnici #include <services/ffa_svc.h>
16e0b1a6d5SMarc Bonnici #include "spmc.h"
17e0b1a6d5SMarc Bonnici #include "spmc_shared_mem.h"
18e0b1a6d5SMarc Bonnici
19e0b1a6d5SMarc Bonnici #include <platform_def.h>
20e0b1a6d5SMarc Bonnici
21e0b1a6d5SMarc Bonnici /**
22e0b1a6d5SMarc Bonnici * struct spmc_shmem_obj - Shared memory object.
23e0b1a6d5SMarc Bonnici * @desc_size: Size of @desc.
24e0b1a6d5SMarc Bonnici * @desc_filled: Size of @desc already received.
25e0b1a6d5SMarc Bonnici * @in_use: Number of clients that have called ffa_mem_retrieve_req
26e0b1a6d5SMarc Bonnici * without a matching ffa_mem_relinquish call.
277d34c9bbSAndrei Homescu * @hyp_shift: If the last ffa_mem_retrieve_req came from a hypervisor
287d34c9bbSAndrei Homescu * on its own behalf, shift the fragment offset in the
297d34c9bbSAndrei Homescu * descriptor forward by this amount to get the correct
307d34c9bbSAndrei Homescu * position of the next fragment.
31e0b1a6d5SMarc Bonnici * @desc: FF-A memory region descriptor passed in ffa_mem_share.
32e0b1a6d5SMarc Bonnici */
33e0b1a6d5SMarc Bonnici struct spmc_shmem_obj {
34e0b1a6d5SMarc Bonnici size_t desc_size;
35e0b1a6d5SMarc Bonnici size_t desc_filled;
36e0b1a6d5SMarc Bonnici size_t in_use;
377d34c9bbSAndrei Homescu ssize_t hyp_shift;
387e804f96SMarc Bonnici struct ffa_mtd desc;
39e0b1a6d5SMarc Bonnici };
40e0b1a6d5SMarc Bonnici
41e0b1a6d5SMarc Bonnici /*
42e0b1a6d5SMarc Bonnici * Declare our data structure to store the metadata of memory share requests.
43e0b1a6d5SMarc Bonnici * The main datastore is allocated on a per platform basis to ensure enough
44e0b1a6d5SMarc Bonnici * storage can be made available.
45e0b1a6d5SMarc Bonnici * The address of the data store will be populated by the SPMC during its
46e0b1a6d5SMarc Bonnici * initialization.
47e0b1a6d5SMarc Bonnici */
48e0b1a6d5SMarc Bonnici
49e0b1a6d5SMarc Bonnici struct spmc_shmem_obj_state spmc_shmem_obj_state = {
50e0b1a6d5SMarc Bonnici /* Set start value for handle so top 32 bits are needed quickly. */
51e0b1a6d5SMarc Bonnici .next_handle = 0xffffffc0U,
52e0b1a6d5SMarc Bonnici };
53e0b1a6d5SMarc Bonnici
54e0b1a6d5SMarc Bonnici /**
55e0b1a6d5SMarc Bonnici * spmc_shmem_obj_size - Convert from descriptor size to object size.
56e0b1a6d5SMarc Bonnici * @desc_size: Size of struct ffa_memory_region_descriptor object.
57e0b1a6d5SMarc Bonnici *
58e0b1a6d5SMarc Bonnici * Return: Size of struct spmc_shmem_obj object.
59e0b1a6d5SMarc Bonnici */
spmc_shmem_obj_size(size_t desc_size)60e0b1a6d5SMarc Bonnici static size_t spmc_shmem_obj_size(size_t desc_size)
61e0b1a6d5SMarc Bonnici {
62e0b1a6d5SMarc Bonnici return desc_size + offsetof(struct spmc_shmem_obj, desc);
63e0b1a6d5SMarc Bonnici }
64e0b1a6d5SMarc Bonnici
65e0b1a6d5SMarc Bonnici /**
66e0b1a6d5SMarc Bonnici * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
67e0b1a6d5SMarc Bonnici * @state: Global state.
68e0b1a6d5SMarc Bonnici * @desc_size: Size of struct ffa_memory_region_descriptor object that
69e0b1a6d5SMarc Bonnici * allocated object will hold.
70e0b1a6d5SMarc Bonnici *
71e0b1a6d5SMarc Bonnici * Return: Pointer to newly allocated object, or %NULL if there not enough space
72e0b1a6d5SMarc Bonnici * left. The returned pointer is only valid while @state is locked, to
73e0b1a6d5SMarc Bonnici * used it again after unlocking @state, spmc_shmem_obj_lookup must be
74e0b1a6d5SMarc Bonnici * called.
75e0b1a6d5SMarc Bonnici */
76e0b1a6d5SMarc Bonnici static struct spmc_shmem_obj *
spmc_shmem_obj_alloc(struct spmc_shmem_obj_state * state,size_t desc_size)77e0b1a6d5SMarc Bonnici spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
78e0b1a6d5SMarc Bonnici {
79e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
80e0b1a6d5SMarc Bonnici size_t free = state->data_size - state->allocated;
81eed15e43SMarc Bonnici size_t obj_size;
82e0b1a6d5SMarc Bonnici
83e0b1a6d5SMarc Bonnici if (state->data == NULL) {
84e0b1a6d5SMarc Bonnici ERROR("Missing shmem datastore!\n");
85e0b1a6d5SMarc Bonnici return NULL;
86e0b1a6d5SMarc Bonnici }
87e0b1a6d5SMarc Bonnici
88dd94372dSDemi Marie Obenour /* Ensure that descriptor size is aligned */
89dd94372dSDemi Marie Obenour if (!is_aligned(desc_size, 16)) {
90dd94372dSDemi Marie Obenour WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
91dd94372dSDemi Marie Obenour __func__, desc_size);
92dd94372dSDemi Marie Obenour return NULL;
93dd94372dSDemi Marie Obenour }
94dd94372dSDemi Marie Obenour
95eed15e43SMarc Bonnici obj_size = spmc_shmem_obj_size(desc_size);
96eed15e43SMarc Bonnici
97eed15e43SMarc Bonnici /* Ensure the obj size has not overflowed. */
98eed15e43SMarc Bonnici if (obj_size < desc_size) {
99eed15e43SMarc Bonnici WARN("%s(0x%zx) desc_size overflow\n",
100eed15e43SMarc Bonnici __func__, desc_size);
101eed15e43SMarc Bonnici return NULL;
102eed15e43SMarc Bonnici }
103eed15e43SMarc Bonnici
104eed15e43SMarc Bonnici if (obj_size > free) {
105e0b1a6d5SMarc Bonnici WARN("%s(0x%zx) failed, free 0x%zx\n",
106e0b1a6d5SMarc Bonnici __func__, desc_size, free);
107e0b1a6d5SMarc Bonnici return NULL;
108e0b1a6d5SMarc Bonnici }
109e0b1a6d5SMarc Bonnici obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
1107e804f96SMarc Bonnici obj->desc = (struct ffa_mtd) {0};
111e0b1a6d5SMarc Bonnici obj->desc_size = desc_size;
112e0b1a6d5SMarc Bonnici obj->desc_filled = 0;
113e0b1a6d5SMarc Bonnici obj->in_use = 0;
1147d34c9bbSAndrei Homescu obj->hyp_shift = 0;
115eed15e43SMarc Bonnici state->allocated += obj_size;
116e0b1a6d5SMarc Bonnici return obj;
117e0b1a6d5SMarc Bonnici }
118e0b1a6d5SMarc Bonnici
119e0b1a6d5SMarc Bonnici /**
120e0b1a6d5SMarc Bonnici * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
121e0b1a6d5SMarc Bonnici * @state: Global state.
122e0b1a6d5SMarc Bonnici * @obj: Object to free.
123e0b1a6d5SMarc Bonnici *
124e0b1a6d5SMarc Bonnici * Release memory used by @obj. Other objects may move, so on return all
125e0b1a6d5SMarc Bonnici * pointers to struct spmc_shmem_obj object should be considered invalid, not
126e0b1a6d5SMarc Bonnici * just @obj.
127e0b1a6d5SMarc Bonnici *
128e0b1a6d5SMarc Bonnici * The current implementation always compacts the remaining objects to simplify
129e0b1a6d5SMarc Bonnici * the allocator and to avoid fragmentation.
130e0b1a6d5SMarc Bonnici */
131e0b1a6d5SMarc Bonnici
spmc_shmem_obj_free(struct spmc_shmem_obj_state * state,struct spmc_shmem_obj * obj)132e0b1a6d5SMarc Bonnici static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
133e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj)
134e0b1a6d5SMarc Bonnici {
135e0b1a6d5SMarc Bonnici size_t free_size = spmc_shmem_obj_size(obj->desc_size);
136e0b1a6d5SMarc Bonnici uint8_t *shift_dest = (uint8_t *)obj;
137e0b1a6d5SMarc Bonnici uint8_t *shift_src = shift_dest + free_size;
138e0b1a6d5SMarc Bonnici size_t shift_size = state->allocated - (shift_src - state->data);
139e0b1a6d5SMarc Bonnici
140e0b1a6d5SMarc Bonnici if (shift_size != 0U) {
141e0b1a6d5SMarc Bonnici memmove(shift_dest, shift_src, shift_size);
142e0b1a6d5SMarc Bonnici }
143e0b1a6d5SMarc Bonnici state->allocated -= free_size;
144e0b1a6d5SMarc Bonnici }
145e0b1a6d5SMarc Bonnici
146e0b1a6d5SMarc Bonnici /**
147e0b1a6d5SMarc Bonnici * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
148e0b1a6d5SMarc Bonnici * @state: Global state.
149e0b1a6d5SMarc Bonnici * @handle: Unique handle of object to return.
150e0b1a6d5SMarc Bonnici *
151e0b1a6d5SMarc Bonnici * Return: struct spmc_shmem_obj_state object with handle matching @handle.
152e0b1a6d5SMarc Bonnici * %NULL, if not object in @state->data has a matching handle.
153e0b1a6d5SMarc Bonnici */
154e0b1a6d5SMarc Bonnici static struct spmc_shmem_obj *
spmc_shmem_obj_lookup(struct spmc_shmem_obj_state * state,uint64_t handle)155e0b1a6d5SMarc Bonnici spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
156e0b1a6d5SMarc Bonnici {
157e0b1a6d5SMarc Bonnici uint8_t *curr = state->data;
158e0b1a6d5SMarc Bonnici
159e0b1a6d5SMarc Bonnici while (curr - state->data < state->allocated) {
160e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
161e0b1a6d5SMarc Bonnici
162e0b1a6d5SMarc Bonnici if (obj->desc.handle == handle) {
163e0b1a6d5SMarc Bonnici return obj;
164e0b1a6d5SMarc Bonnici }
165e0b1a6d5SMarc Bonnici curr += spmc_shmem_obj_size(obj->desc_size);
166e0b1a6d5SMarc Bonnici }
167e0b1a6d5SMarc Bonnici return NULL;
168e0b1a6d5SMarc Bonnici }
169e0b1a6d5SMarc Bonnici
170fef85e1eSMarc Bonnici /**
171fef85e1eSMarc Bonnici * spmc_shmem_obj_get_next - Get the next memory object from an offset.
172fef85e1eSMarc Bonnici * @offset: Offset used to track which objects have previously been
173fef85e1eSMarc Bonnici * returned.
174fef85e1eSMarc Bonnici *
175fef85e1eSMarc Bonnici * Return: the next struct spmc_shmem_obj_state object from the provided
176fef85e1eSMarc Bonnici * offset.
177fef85e1eSMarc Bonnici * %NULL, if there are no more objects.
178fef85e1eSMarc Bonnici */
179fef85e1eSMarc Bonnici static struct spmc_shmem_obj *
spmc_shmem_obj_get_next(struct spmc_shmem_obj_state * state,size_t * offset)180fef85e1eSMarc Bonnici spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
181fef85e1eSMarc Bonnici {
182fef85e1eSMarc Bonnici uint8_t *curr = state->data + *offset;
183fef85e1eSMarc Bonnici
184fef85e1eSMarc Bonnici if (curr - state->data < state->allocated) {
185fef85e1eSMarc Bonnici struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
186fef85e1eSMarc Bonnici
187fef85e1eSMarc Bonnici *offset += spmc_shmem_obj_size(obj->desc_size);
188fef85e1eSMarc Bonnici
189fef85e1eSMarc Bonnici return obj;
190fef85e1eSMarc Bonnici }
191fef85e1eSMarc Bonnici return NULL;
192fef85e1eSMarc Bonnici }
193fef85e1eSMarc Bonnici
1947e804f96SMarc Bonnici /*******************************************************************************
1957e804f96SMarc Bonnici * FF-A memory descriptor helper functions.
1967e804f96SMarc Bonnici ******************************************************************************/
1977e804f96SMarc Bonnici /**
1987e804f96SMarc Bonnici * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
1997e804f96SMarc Bonnici * clients FF-A version.
2007e804f96SMarc Bonnici * @desc: The memory transaction descriptor.
2017e804f96SMarc Bonnici * @index: The index of the emad element to be accessed.
2027e804f96SMarc Bonnici * @ffa_version: FF-A version of the provided structure.
2037e804f96SMarc Bonnici * @emad_size: Will be populated with the size of the returned emad
2047e804f96SMarc Bonnici * descriptor.
2057e804f96SMarc Bonnici * Return: A pointer to the requested emad structure.
2067e804f96SMarc Bonnici */
2077e804f96SMarc Bonnici static void *
spmc_shmem_obj_get_emad(const struct ffa_mtd * desc,uint32_t index,uint32_t ffa_version,size_t * emad_size)2087e804f96SMarc Bonnici spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
2097e804f96SMarc Bonnici uint32_t ffa_version, size_t *emad_size)
210e0b1a6d5SMarc Bonnici {
2117e804f96SMarc Bonnici uint8_t *emad;
21277acde4cSDemi Marie Obenour
21377acde4cSDemi Marie Obenour assert(index < desc->emad_count);
21477acde4cSDemi Marie Obenour
2157e804f96SMarc Bonnici /*
2167e804f96SMarc Bonnici * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
2177e804f96SMarc Bonnici * format, otherwise assume it is a v1.1 format.
2187e804f96SMarc Bonnici */
2197e804f96SMarc Bonnici if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
220cbbb8a03SDemi Marie Obenour emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
2217e804f96SMarc Bonnici *emad_size = sizeof(struct ffa_emad_v1_0);
2227e804f96SMarc Bonnici } else {
223cbbb8a03SDemi Marie Obenour assert(is_aligned(desc->emad_offset, 16));
2247e804f96SMarc Bonnici emad = ((uint8_t *) desc + desc->emad_offset);
2257e804f96SMarc Bonnici *emad_size = desc->emad_size;
2267e804f96SMarc Bonnici }
227cbbb8a03SDemi Marie Obenour
228cbbb8a03SDemi Marie Obenour assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
2297e804f96SMarc Bonnici return (emad + (*emad_size * index));
2307e804f96SMarc Bonnici }
2317e804f96SMarc Bonnici
2327e804f96SMarc Bonnici /**
2337e804f96SMarc Bonnici * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
2347e804f96SMarc Bonnici * FF-A version of the descriptor.
2357e804f96SMarc Bonnici * @obj: Object containing ffa_memory_region_descriptor.
2367e804f96SMarc Bonnici *
2377e804f96SMarc Bonnici * Return: struct ffa_comp_mrd object corresponding to the composite memory
2387e804f96SMarc Bonnici * region descriptor.
2397e804f96SMarc Bonnici */
2407e804f96SMarc Bonnici static struct ffa_comp_mrd *
spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj * obj,uint32_t ffa_version)2417e804f96SMarc Bonnici spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
2427e804f96SMarc Bonnici {
2437e804f96SMarc Bonnici size_t emad_size;
2447e804f96SMarc Bonnici /*
2457e804f96SMarc Bonnici * The comp_mrd_offset field of the emad descriptor remains consistent
2467e804f96SMarc Bonnici * between FF-A versions therefore we can use the v1.0 descriptor here
2477e804f96SMarc Bonnici * in all cases.
2487e804f96SMarc Bonnici */
2497e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
2507e804f96SMarc Bonnici ffa_version,
2517e804f96SMarc Bonnici &emad_size);
2527e804f96SMarc Bonnici
2537e804f96SMarc Bonnici /* Ensure the composite descriptor offset is aligned. */
2547e804f96SMarc Bonnici if (!is_aligned(emad->comp_mrd_offset, 8)) {
2557e804f96SMarc Bonnici WARN("Unaligned composite memory region descriptor offset.\n");
2567e804f96SMarc Bonnici return NULL;
2577e804f96SMarc Bonnici }
2587e804f96SMarc Bonnici
259e0b1a6d5SMarc Bonnici return (struct ffa_comp_mrd *)
2607e804f96SMarc Bonnici ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
261e0b1a6d5SMarc Bonnici }
262e0b1a6d5SMarc Bonnici
263e0b1a6d5SMarc Bonnici /**
264b4c3621eSMarc Bonnici * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
265b4c3621eSMarc Bonnici * a given memory transaction.
266b4c3621eSMarc Bonnici * @sp_id: Partition ID to validate.
2671543d17bSShruti Gupta * @obj: The shared memory object containing the descriptor
2681543d17bSShruti Gupta * of the memory transaction.
269b4c3621eSMarc Bonnici * Return: true if ID is valid, else false.
270b4c3621eSMarc Bonnici */
spmc_shmem_obj_validate_id(struct spmc_shmem_obj * obj,uint16_t sp_id)2711543d17bSShruti Gupta bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
272b4c3621eSMarc Bonnici {
273b4c3621eSMarc Bonnici bool found = false;
2741543d17bSShruti Gupta struct ffa_mtd *desc = &obj->desc;
2751543d17bSShruti Gupta size_t desc_size = obj->desc_size;
276b4c3621eSMarc Bonnici
277b4c3621eSMarc Bonnici /* Validate the partition is a valid participant. */
278b4c3621eSMarc Bonnici for (unsigned int i = 0U; i < desc->emad_count; i++) {
279b4c3621eSMarc Bonnici size_t emad_size;
280b4c3621eSMarc Bonnici struct ffa_emad_v1_0 *emad;
281b4c3621eSMarc Bonnici
282b4c3621eSMarc Bonnici emad = spmc_shmem_obj_get_emad(desc, i,
283b4c3621eSMarc Bonnici MAKE_FFA_VERSION(1, 1),
284b4c3621eSMarc Bonnici &emad_size);
2851543d17bSShruti Gupta /*
2861543d17bSShruti Gupta * Validate the calculated emad address resides within the
2871543d17bSShruti Gupta * descriptor.
2881543d17bSShruti Gupta */
2891543d17bSShruti Gupta if ((emad == NULL) || (uintptr_t) emad >=
2901543d17bSShruti Gupta (uintptr_t)((uint8_t *) desc + desc_size)) {
2911543d17bSShruti Gupta VERBOSE("Invalid emad.\n");
2921543d17bSShruti Gupta break;
2931543d17bSShruti Gupta }
294b4c3621eSMarc Bonnici if (sp_id == emad->mapd.endpoint_id) {
295b4c3621eSMarc Bonnici found = true;
296b4c3621eSMarc Bonnici break;
297b4c3621eSMarc Bonnici }
298b4c3621eSMarc Bonnici }
299b4c3621eSMarc Bonnici return found;
300b4c3621eSMarc Bonnici }
301b4c3621eSMarc Bonnici
302fef85e1eSMarc Bonnici /*
303fef85e1eSMarc Bonnici * Compare two memory regions to determine if any range overlaps with another
304fef85e1eSMarc Bonnici * ongoing memory transaction.
305fef85e1eSMarc Bonnici */
306fef85e1eSMarc Bonnici static bool
overlapping_memory_regions(struct ffa_comp_mrd * region1,struct ffa_comp_mrd * region2)307fef85e1eSMarc Bonnici overlapping_memory_regions(struct ffa_comp_mrd *region1,
308fef85e1eSMarc Bonnici struct ffa_comp_mrd *region2)
309fef85e1eSMarc Bonnici {
310fef85e1eSMarc Bonnici uint64_t region1_start;
311fef85e1eSMarc Bonnici uint64_t region1_size;
312fef85e1eSMarc Bonnici uint64_t region1_end;
313fef85e1eSMarc Bonnici uint64_t region2_start;
314fef85e1eSMarc Bonnici uint64_t region2_size;
315fef85e1eSMarc Bonnici uint64_t region2_end;
316fef85e1eSMarc Bonnici
317fef85e1eSMarc Bonnici assert(region1 != NULL);
318fef85e1eSMarc Bonnici assert(region2 != NULL);
319fef85e1eSMarc Bonnici
320fef85e1eSMarc Bonnici if (region1 == region2) {
321fef85e1eSMarc Bonnici return true;
322fef85e1eSMarc Bonnici }
323fef85e1eSMarc Bonnici
324fef85e1eSMarc Bonnici /*
325fef85e1eSMarc Bonnici * Check each memory region in the request against existing
326fef85e1eSMarc Bonnici * transactions.
327fef85e1eSMarc Bonnici */
328fef85e1eSMarc Bonnici for (size_t i = 0; i < region1->address_range_count; i++) {
329fef85e1eSMarc Bonnici
330fef85e1eSMarc Bonnici region1_start = region1->address_range_array[i].address;
331fef85e1eSMarc Bonnici region1_size =
332fef85e1eSMarc Bonnici region1->address_range_array[i].page_count *
333fef85e1eSMarc Bonnici PAGE_SIZE_4KB;
334fef85e1eSMarc Bonnici region1_end = region1_start + region1_size;
335fef85e1eSMarc Bonnici
336fef85e1eSMarc Bonnici for (size_t j = 0; j < region2->address_range_count; j++) {
337fef85e1eSMarc Bonnici
338fef85e1eSMarc Bonnici region2_start = region2->address_range_array[j].address;
339fef85e1eSMarc Bonnici region2_size =
340fef85e1eSMarc Bonnici region2->address_range_array[j].page_count *
341fef85e1eSMarc Bonnici PAGE_SIZE_4KB;
342fef85e1eSMarc Bonnici region2_end = region2_start + region2_size;
343fef85e1eSMarc Bonnici
3440dc35186SMarc Bonnici /* Check if regions are not overlapping. */
3450dc35186SMarc Bonnici if (!((region2_end <= region1_start) ||
3460dc35186SMarc Bonnici (region1_end <= region2_start))) {
347fef85e1eSMarc Bonnici WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
348fef85e1eSMarc Bonnici region1_start, region1_end,
349fef85e1eSMarc Bonnici region2_start, region2_end);
350fef85e1eSMarc Bonnici return true;
351fef85e1eSMarc Bonnici }
352fef85e1eSMarc Bonnici }
353fef85e1eSMarc Bonnici }
354fef85e1eSMarc Bonnici return false;
355fef85e1eSMarc Bonnici }
356fef85e1eSMarc Bonnici
3577e804f96SMarc Bonnici /*******************************************************************************
3587e804f96SMarc Bonnici * FF-A v1.0 Memory Descriptor Conversion Helpers.
3597e804f96SMarc Bonnici ******************************************************************************/
3607e804f96SMarc Bonnici /**
3617e804f96SMarc Bonnici * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
3627e804f96SMarc Bonnici * converted descriptor.
3637e804f96SMarc Bonnici * @orig: The original v1.0 memory transaction descriptor.
3647e804f96SMarc Bonnici * @desc_size: The size of the original v1.0 memory transaction descriptor.
3657e804f96SMarc Bonnici *
3667e804f96SMarc Bonnici * Return: the size required to store the descriptor store in the v1.1 format.
3677e804f96SMarc Bonnici */
36827c02425SDemi Marie Obenour static uint64_t
spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 * orig,size_t desc_size)3697e804f96SMarc Bonnici spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
3707e804f96SMarc Bonnici {
37127c02425SDemi Marie Obenour uint64_t size = 0;
3727e804f96SMarc Bonnici struct ffa_comp_mrd *mrd;
3737e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array = orig->emad;
3747e804f96SMarc Bonnici
3757e804f96SMarc Bonnici /* Get the size of the v1.1 descriptor. */
3767e804f96SMarc Bonnici size += sizeof(struct ffa_mtd);
3777e804f96SMarc Bonnici
3787e804f96SMarc Bonnici /* Add the size of the emad descriptors. */
3797e804f96SMarc Bonnici size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
3807e804f96SMarc Bonnici
3817e804f96SMarc Bonnici /* Add the size of the composite mrds. */
3827e804f96SMarc Bonnici size += sizeof(struct ffa_comp_mrd);
3837e804f96SMarc Bonnici
3847e804f96SMarc Bonnici /* Add the size of the constituent mrds. */
3857e804f96SMarc Bonnici mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
3867e804f96SMarc Bonnici emad_array[0].comp_mrd_offset);
3877e804f96SMarc Bonnici
38827c02425SDemi Marie Obenour /* Add the size of the memory region descriptors. */
3897e804f96SMarc Bonnici size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
3907e804f96SMarc Bonnici
3917e804f96SMarc Bonnici return size;
3927e804f96SMarc Bonnici }
3937e804f96SMarc Bonnici
3947e804f96SMarc Bonnici /**
3957e804f96SMarc Bonnici * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
3967e804f96SMarc Bonnici * converted descriptor.
3977e804f96SMarc Bonnici * @orig: The original v1.1 memory transaction descriptor.
3987e804f96SMarc Bonnici * @desc_size: The size of the original v1.1 memory transaction descriptor.
3997e804f96SMarc Bonnici *
4007e804f96SMarc Bonnici * Return: the size required to store the descriptor store in the v1.0 format.
4017e804f96SMarc Bonnici */
4027e804f96SMarc Bonnici static size_t
spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd * orig,size_t desc_size)4037e804f96SMarc Bonnici spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
4047e804f96SMarc Bonnici {
4057e804f96SMarc Bonnici size_t size = 0;
4067e804f96SMarc Bonnici struct ffa_comp_mrd *mrd;
4077e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
4087e804f96SMarc Bonnici ((uint8_t *) orig +
4097e804f96SMarc Bonnici orig->emad_offset);
4107e804f96SMarc Bonnici
4117e804f96SMarc Bonnici /* Get the size of the v1.0 descriptor. */
4127e804f96SMarc Bonnici size += sizeof(struct ffa_mtd_v1_0);
4137e804f96SMarc Bonnici
4147e804f96SMarc Bonnici /* Add the size of the v1.0 emad descriptors. */
4157e804f96SMarc Bonnici size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
4167e804f96SMarc Bonnici
4177e804f96SMarc Bonnici /* Add the size of the composite mrds. */
4187e804f96SMarc Bonnici size += sizeof(struct ffa_comp_mrd);
4197e804f96SMarc Bonnici
4207e804f96SMarc Bonnici /* Add the size of the constituent mrds. */
4217e804f96SMarc Bonnici mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
4227e804f96SMarc Bonnici emad_array[0].comp_mrd_offset);
4237e804f96SMarc Bonnici
4247e804f96SMarc Bonnici /* Check the calculated address is within the memory descriptor. */
425def7590bSMarc Bonnici if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
426def7590bSMarc Bonnici (uintptr_t)((uint8_t *) orig + desc_size)) {
4277e804f96SMarc Bonnici return 0;
4287e804f96SMarc Bonnici }
4297e804f96SMarc Bonnici size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
4307e804f96SMarc Bonnici
4317e804f96SMarc Bonnici return size;
4327e804f96SMarc Bonnici }
4337e804f96SMarc Bonnici
4347e804f96SMarc Bonnici /**
4357e804f96SMarc Bonnici * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
4367e804f96SMarc Bonnici * @out_obj: The shared memory object to populate the converted descriptor.
4377e804f96SMarc Bonnici * @orig: The shared memory object containing the v1.0 descriptor.
4387e804f96SMarc Bonnici *
4397e804f96SMarc Bonnici * Return: true if the conversion is successful else false.
4407e804f96SMarc Bonnici */
4417e804f96SMarc Bonnici static bool
spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)4427e804f96SMarc Bonnici spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
4437e804f96SMarc Bonnici struct spmc_shmem_obj *orig)
4447e804f96SMarc Bonnici {
4457e804f96SMarc Bonnici struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
4467e804f96SMarc Bonnici struct ffa_mtd *out = &out_obj->desc;
4477e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array_in;
4487e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array_out;
4497e804f96SMarc Bonnici struct ffa_comp_mrd *mrd_in;
4507e804f96SMarc Bonnici struct ffa_comp_mrd *mrd_out;
4517e804f96SMarc Bonnici
4527e804f96SMarc Bonnici size_t mrd_in_offset;
4537e804f96SMarc Bonnici size_t mrd_out_offset;
4547e804f96SMarc Bonnici size_t mrd_size = 0;
4557e804f96SMarc Bonnici
4567e804f96SMarc Bonnici /* Populate the new descriptor format from the v1.0 struct. */
4577e804f96SMarc Bonnici out->sender_id = mtd_orig->sender_id;
4587e804f96SMarc Bonnici out->memory_region_attributes = mtd_orig->memory_region_attributes;
4597e804f96SMarc Bonnici out->flags = mtd_orig->flags;
4607e804f96SMarc Bonnici out->handle = mtd_orig->handle;
4617e804f96SMarc Bonnici out->tag = mtd_orig->tag;
4627e804f96SMarc Bonnici out->emad_count = mtd_orig->emad_count;
4637e804f96SMarc Bonnici out->emad_size = sizeof(struct ffa_emad_v1_0);
4647e804f96SMarc Bonnici
4657e804f96SMarc Bonnici /*
4667e804f96SMarc Bonnici * We will locate the emad descriptors directly after the ffa_mtd
4677e804f96SMarc Bonnici * struct. This will be 8-byte aligned.
4687e804f96SMarc Bonnici */
4697e804f96SMarc Bonnici out->emad_offset = sizeof(struct ffa_mtd);
4707e804f96SMarc Bonnici
4717e804f96SMarc Bonnici emad_array_in = mtd_orig->emad;
4727e804f96SMarc Bonnici emad_array_out = (struct ffa_emad_v1_0 *)
4737e804f96SMarc Bonnici ((uint8_t *) out + out->emad_offset);
4747e804f96SMarc Bonnici
4757e804f96SMarc Bonnici /* Copy across the emad structs. */
4767e804f96SMarc Bonnici for (unsigned int i = 0U; i < out->emad_count; i++) {
4771543d17bSShruti Gupta /* Bound check for emad array. */
4781543d17bSShruti Gupta if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
4791543d17bSShruti Gupta ((uint8_t *) mtd_orig + orig->desc_size)) {
4801543d17bSShruti Gupta VERBOSE("%s: Invalid mtd structure.\n", __func__);
4811543d17bSShruti Gupta return false;
4821543d17bSShruti Gupta }
4837e804f96SMarc Bonnici memcpy(&emad_array_out[i], &emad_array_in[i],
4847e804f96SMarc Bonnici sizeof(struct ffa_emad_v1_0));
4857e804f96SMarc Bonnici }
4867e804f96SMarc Bonnici
4877e804f96SMarc Bonnici /* Place the mrd descriptors after the end of the emad descriptors.*/
4887e804f96SMarc Bonnici mrd_in_offset = emad_array_in->comp_mrd_offset;
4897e804f96SMarc Bonnici mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
4907e804f96SMarc Bonnici mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
4917e804f96SMarc Bonnici
4927e804f96SMarc Bonnici /* Add the size of the composite memory region descriptor. */
4937e804f96SMarc Bonnici mrd_size += sizeof(struct ffa_comp_mrd);
4947e804f96SMarc Bonnici
4957e804f96SMarc Bonnici /* Find the mrd descriptor. */
4967e804f96SMarc Bonnici mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
4977e804f96SMarc Bonnici
4987e804f96SMarc Bonnici /* Add the size of the constituent memory region descriptors. */
4997e804f96SMarc Bonnici mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
5007e804f96SMarc Bonnici
5017e804f96SMarc Bonnici /*
5027e804f96SMarc Bonnici * Update the offset in the emads by the delta between the input and
5037e804f96SMarc Bonnici * output addresses.
5047e804f96SMarc Bonnici */
5057e804f96SMarc Bonnici for (unsigned int i = 0U; i < out->emad_count; i++) {
5067e804f96SMarc Bonnici emad_array_out[i].comp_mrd_offset =
5077e804f96SMarc Bonnici emad_array_in[i].comp_mrd_offset +
5087e804f96SMarc Bonnici (mrd_out_offset - mrd_in_offset);
5097e804f96SMarc Bonnici }
5107e804f96SMarc Bonnici
5117e804f96SMarc Bonnici /* Verify that we stay within bound of the memory descriptors. */
5127e804f96SMarc Bonnici if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
5137e804f96SMarc Bonnici (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
5147e804f96SMarc Bonnici ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
5157e804f96SMarc Bonnici (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
5167e804f96SMarc Bonnici ERROR("%s: Invalid mrd structure.\n", __func__);
5177e804f96SMarc Bonnici return false;
5187e804f96SMarc Bonnici }
5197e804f96SMarc Bonnici
5207e804f96SMarc Bonnici /* Copy the mrd descriptors directly. */
5217e804f96SMarc Bonnici memcpy(mrd_out, mrd_in, mrd_size);
5227e804f96SMarc Bonnici
5237e804f96SMarc Bonnici return true;
5247e804f96SMarc Bonnici }
5257e804f96SMarc Bonnici
5267e804f96SMarc Bonnici /**
5277e804f96SMarc Bonnici * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
5287e804f96SMarc Bonnici * v1.0 memory object.
5297e804f96SMarc Bonnici * @out_obj: The shared memory object to populate the v1.0 descriptor.
5307e804f96SMarc Bonnici * @orig: The shared memory object containing the v1.1 descriptor.
5317e804f96SMarc Bonnici *
5327e804f96SMarc Bonnici * Return: true if the conversion is successful else false.
5337e804f96SMarc Bonnici */
5347e804f96SMarc Bonnici static bool
spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)5357e804f96SMarc Bonnici spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
5367e804f96SMarc Bonnici struct spmc_shmem_obj *orig)
5377e804f96SMarc Bonnici {
5387e804f96SMarc Bonnici struct ffa_mtd *mtd_orig = &orig->desc;
5397e804f96SMarc Bonnici struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
5407e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_in;
5417e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array_in;
5427e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad_array_out;
5437e804f96SMarc Bonnici struct ffa_comp_mrd *mrd_in;
5447e804f96SMarc Bonnici struct ffa_comp_mrd *mrd_out;
5457e804f96SMarc Bonnici
5467e804f96SMarc Bonnici size_t mrd_in_offset;
5477e804f96SMarc Bonnici size_t mrd_out_offset;
5487e804f96SMarc Bonnici size_t emad_out_array_size;
5497e804f96SMarc Bonnici size_t mrd_size = 0;
5501543d17bSShruti Gupta size_t orig_desc_size = orig->desc_size;
5517e804f96SMarc Bonnici
5527e804f96SMarc Bonnici /* Populate the v1.0 descriptor format from the v1.1 struct. */
5537e804f96SMarc Bonnici out->sender_id = mtd_orig->sender_id;
5547e804f96SMarc Bonnici out->memory_region_attributes = mtd_orig->memory_region_attributes;
5557e804f96SMarc Bonnici out->flags = mtd_orig->flags;
5567e804f96SMarc Bonnici out->handle = mtd_orig->handle;
5577e804f96SMarc Bonnici out->tag = mtd_orig->tag;
5587e804f96SMarc Bonnici out->emad_count = mtd_orig->emad_count;
5597e804f96SMarc Bonnici
5607e804f96SMarc Bonnici /* Determine the location of the emad array in both descriptors. */
5617e804f96SMarc Bonnici emad_array_in = (struct ffa_emad_v1_0 *)
5627e804f96SMarc Bonnici ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
5637e804f96SMarc Bonnici emad_array_out = out->emad;
5647e804f96SMarc Bonnici
5657e804f96SMarc Bonnici /* Copy across the emad structs. */
5667e804f96SMarc Bonnici emad_in = emad_array_in;
5677e804f96SMarc Bonnici for (unsigned int i = 0U; i < out->emad_count; i++) {
5681543d17bSShruti Gupta /* Bound check for emad array. */
5691543d17bSShruti Gupta if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
5701543d17bSShruti Gupta ((uint8_t *) mtd_orig + orig_desc_size)) {
5711543d17bSShruti Gupta VERBOSE("%s: Invalid mtd structure.\n", __func__);
5721543d17bSShruti Gupta return false;
5731543d17bSShruti Gupta }
5747e804f96SMarc Bonnici memcpy(&emad_array_out[i], emad_in,
5757e804f96SMarc Bonnici sizeof(struct ffa_emad_v1_0));
5767e804f96SMarc Bonnici
5777e804f96SMarc Bonnici emad_in += mtd_orig->emad_size;
5787e804f96SMarc Bonnici }
5797e804f96SMarc Bonnici
5807e804f96SMarc Bonnici /* Place the mrd descriptors after the end of the emad descriptors. */
5817e804f96SMarc Bonnici emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
5827e804f96SMarc Bonnici
5837e804f96SMarc Bonnici mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
5847e804f96SMarc Bonnici emad_out_array_size;
5857e804f96SMarc Bonnici
5867e804f96SMarc Bonnici mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
5877e804f96SMarc Bonnici
5887e804f96SMarc Bonnici mrd_in_offset = mtd_orig->emad_offset +
5897e804f96SMarc Bonnici (mtd_orig->emad_size * mtd_orig->emad_count);
5907e804f96SMarc Bonnici
5917e804f96SMarc Bonnici /* Add the size of the composite memory region descriptor. */
5927e804f96SMarc Bonnici mrd_size += sizeof(struct ffa_comp_mrd);
5937e804f96SMarc Bonnici
5947e804f96SMarc Bonnici /* Find the mrd descriptor. */
5957e804f96SMarc Bonnici mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
5967e804f96SMarc Bonnici
5977e804f96SMarc Bonnici /* Add the size of the constituent memory region descriptors. */
5987e804f96SMarc Bonnici mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
5997e804f96SMarc Bonnici
6007e804f96SMarc Bonnici /*
6017e804f96SMarc Bonnici * Update the offset in the emads by the delta between the input and
6027e804f96SMarc Bonnici * output addresses.
6037e804f96SMarc Bonnici */
6047e804f96SMarc Bonnici emad_in = emad_array_in;
6057e804f96SMarc Bonnici
6067e804f96SMarc Bonnici for (unsigned int i = 0U; i < out->emad_count; i++) {
6077e804f96SMarc Bonnici emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
6087e804f96SMarc Bonnici (mrd_out_offset -
6097e804f96SMarc Bonnici mrd_in_offset);
6107e804f96SMarc Bonnici emad_in += mtd_orig->emad_size;
6117e804f96SMarc Bonnici }
6127e804f96SMarc Bonnici
6137e804f96SMarc Bonnici /* Verify that we stay within bound of the memory descriptors. */
6147e804f96SMarc Bonnici if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
6157e804f96SMarc Bonnici (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
6167e804f96SMarc Bonnici ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
6177e804f96SMarc Bonnici (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
6187e804f96SMarc Bonnici ERROR("%s: Invalid mrd structure.\n", __func__);
6197e804f96SMarc Bonnici return false;
6207e804f96SMarc Bonnici }
6217e804f96SMarc Bonnici
6227e804f96SMarc Bonnici /* Copy the mrd descriptors directly. */
6237e804f96SMarc Bonnici memcpy(mrd_out, mrd_in, mrd_size);
6247e804f96SMarc Bonnici
6257e804f96SMarc Bonnici return true;
6267e804f96SMarc Bonnici }
6277e804f96SMarc Bonnici
6287e804f96SMarc Bonnici /**
6297e804f96SMarc Bonnici * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
6307e804f96SMarc Bonnici * the v1.0 format and populates the
6317e804f96SMarc Bonnici * provided buffer.
6327e804f96SMarc Bonnici * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
6337e804f96SMarc Bonnici * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
6347e804f96SMarc Bonnici * @buf_size: Size of the buffer to populate.
6357e804f96SMarc Bonnici * @offset: The offset of the converted descriptor to copy.
6367e804f96SMarc Bonnici * @copy_size: Will be populated with the number of bytes copied.
6377e804f96SMarc Bonnici * @out_desc_size: Will be populated with the total size of the v1.0
6387e804f96SMarc Bonnici * descriptor.
6397e804f96SMarc Bonnici *
6407e804f96SMarc Bonnici * Return: 0 if conversion and population succeeded.
6417e804f96SMarc Bonnici * Note: This function invalidates the reference to @orig therefore
6427e804f96SMarc Bonnici * `spmc_shmem_obj_lookup` must be called if further usage is required.
6437e804f96SMarc Bonnici */
6447e804f96SMarc Bonnici static uint32_t
spmc_populate_ffa_v1_0_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,size_t offset,size_t * copy_size,size_t * v1_0_desc_size)6457e804f96SMarc Bonnici spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
6467e804f96SMarc Bonnici size_t buf_size, size_t offset,
6477e804f96SMarc Bonnici size_t *copy_size, size_t *v1_0_desc_size)
6487e804f96SMarc Bonnici {
6497e804f96SMarc Bonnici struct spmc_shmem_obj *v1_0_obj;
6507e804f96SMarc Bonnici
6517e804f96SMarc Bonnici /* Calculate the size that the v1.0 descriptor will require. */
6527e804f96SMarc Bonnici *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
6537e804f96SMarc Bonnici &orig_obj->desc, orig_obj->desc_size);
6547e804f96SMarc Bonnici
6557e804f96SMarc Bonnici if (*v1_0_desc_size == 0) {
6567e804f96SMarc Bonnici ERROR("%s: cannot determine size of descriptor.\n",
6577e804f96SMarc Bonnici __func__);
6587e804f96SMarc Bonnici return FFA_ERROR_INVALID_PARAMETER;
6597e804f96SMarc Bonnici }
6607e804f96SMarc Bonnici
6617e804f96SMarc Bonnici /* Get a new obj to store the v1.0 descriptor. */
6627e804f96SMarc Bonnici v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
6637e804f96SMarc Bonnici *v1_0_desc_size);
6647e804f96SMarc Bonnici
6657e804f96SMarc Bonnici if (!v1_0_obj) {
6667e804f96SMarc Bonnici return FFA_ERROR_NO_MEMORY;
6677e804f96SMarc Bonnici }
6687e804f96SMarc Bonnici
6697e804f96SMarc Bonnici /* Perform the conversion from v1.1 to v1.0. */
6707e804f96SMarc Bonnici if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
6717e804f96SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
6727e804f96SMarc Bonnici return FFA_ERROR_INVALID_PARAMETER;
6737e804f96SMarc Bonnici }
6747e804f96SMarc Bonnici
6757e804f96SMarc Bonnici *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
6767e804f96SMarc Bonnici memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
6777e804f96SMarc Bonnici
6787e804f96SMarc Bonnici /*
6797e804f96SMarc Bonnici * We're finished with the v1.0 descriptor for now so free it.
6807e804f96SMarc Bonnici * Note that this will invalidate any references to the v1.1
6817e804f96SMarc Bonnici * descriptor.
6827e804f96SMarc Bonnici */
6837e804f96SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
6847e804f96SMarc Bonnici
6857e804f96SMarc Bonnici return 0;
6867e804f96SMarc Bonnici }
6877e804f96SMarc Bonnici
spmc_compatible_version(uint32_t ffa_version,uint16_t major,uint16_t minor)688*3f1c63ddSJay Monkman bool spmc_compatible_version(uint32_t ffa_version, uint16_t major,
6896feaad6cSJens Wiklander uint16_t minor)
6906feaad6cSJens Wiklander {
6916feaad6cSJens Wiklander bool bit31_set = ffa_version & FFA_VERSION_BIT31_MASK;
6926feaad6cSJens Wiklander uint16_t majv = (ffa_version >> FFA_VERSION_MAJOR_SHIFT) &
6936feaad6cSJens Wiklander FFA_VERSION_MAJOR_MASK;
6946feaad6cSJens Wiklander uint16_t minv = (ffa_version >> FFA_VERSION_MINOR_SHIFT) &
6956feaad6cSJens Wiklander FFA_VERSION_MINOR_MASK;
6966feaad6cSJens Wiklander
6976feaad6cSJens Wiklander return !bit31_set && majv == major && minv >= minor;
6986feaad6cSJens Wiklander }
6996feaad6cSJens Wiklander
70056c052d3SDemi Marie Obenour static int
spmc_validate_mtd_start(struct ffa_mtd * desc,uint32_t ffa_version,size_t fragment_length,size_t total_length)70156c052d3SDemi Marie Obenour spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
70256c052d3SDemi Marie Obenour size_t fragment_length, size_t total_length)
70356c052d3SDemi Marie Obenour {
70456c052d3SDemi Marie Obenour unsigned long long emad_end;
70556c052d3SDemi Marie Obenour unsigned long long emad_size;
70656c052d3SDemi Marie Obenour unsigned long long emad_offset;
70756c052d3SDemi Marie Obenour unsigned int min_desc_size;
70856c052d3SDemi Marie Obenour
70956c052d3SDemi Marie Obenour /* Determine the appropriate minimum descriptor size. */
71056c052d3SDemi Marie Obenour if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
71156c052d3SDemi Marie Obenour min_desc_size = sizeof(struct ffa_mtd_v1_0);
712*3f1c63ddSJay Monkman } else if (spmc_compatible_version(ffa_version, 1, 1)) {
71356c052d3SDemi Marie Obenour min_desc_size = sizeof(struct ffa_mtd);
71456c052d3SDemi Marie Obenour } else {
71556c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
71656c052d3SDemi Marie Obenour }
71756c052d3SDemi Marie Obenour if (fragment_length < min_desc_size) {
71856c052d3SDemi Marie Obenour WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
71956c052d3SDemi Marie Obenour min_desc_size);
72056c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
72156c052d3SDemi Marie Obenour }
72256c052d3SDemi Marie Obenour
72356c052d3SDemi Marie Obenour if (desc->emad_count == 0U) {
72456c052d3SDemi Marie Obenour WARN("%s: unsupported attribute desc count %u.\n",
72556c052d3SDemi Marie Obenour __func__, desc->emad_count);
72656c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
72756c052d3SDemi Marie Obenour }
72856c052d3SDemi Marie Obenour
72956c052d3SDemi Marie Obenour /*
73056c052d3SDemi Marie Obenour * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
73156c052d3SDemi Marie Obenour * format, otherwise assume it is a v1.1 format.
73256c052d3SDemi Marie Obenour */
73356c052d3SDemi Marie Obenour if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
73456c052d3SDemi Marie Obenour emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
73556c052d3SDemi Marie Obenour } else {
73656c052d3SDemi Marie Obenour if (!is_aligned(desc->emad_offset, 16)) {
73756c052d3SDemi Marie Obenour WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
73856c052d3SDemi Marie Obenour __func__, desc->emad_offset);
73956c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
74056c052d3SDemi Marie Obenour }
74156c052d3SDemi Marie Obenour if (desc->emad_offset < sizeof(struct ffa_mtd)) {
74256c052d3SDemi Marie Obenour WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
74356c052d3SDemi Marie Obenour __func__, desc->emad_offset,
74456c052d3SDemi Marie Obenour sizeof(struct ffa_mtd));
74556c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
74656c052d3SDemi Marie Obenour }
74756c052d3SDemi Marie Obenour emad_offset = desc->emad_offset;
74856c052d3SDemi Marie Obenour if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
74956c052d3SDemi Marie Obenour WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
75056c052d3SDemi Marie Obenour desc->emad_size, sizeof(struct ffa_emad_v1_0));
75156c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
75256c052d3SDemi Marie Obenour }
75356c052d3SDemi Marie Obenour if (!is_aligned(desc->emad_size, 16)) {
75456c052d3SDemi Marie Obenour WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
75556c052d3SDemi Marie Obenour __func__, desc->emad_size);
75656c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
75756c052d3SDemi Marie Obenour }
75856c052d3SDemi Marie Obenour emad_size = desc->emad_size;
75956c052d3SDemi Marie Obenour }
76056c052d3SDemi Marie Obenour
76156c052d3SDemi Marie Obenour /*
76256c052d3SDemi Marie Obenour * Overflow is impossible: the arithmetic happens in at least 64-bit
76356c052d3SDemi Marie Obenour * precision, but all of the operands are bounded by UINT32_MAX, and
7641dd79f9eSDemi Marie Obenour * ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
7651dd79f9eSDemi Marie Obenour * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
7661dd79f9eSDemi Marie Obenour * = ((2^32 - 1) * (2^32 + 1))
76756c052d3SDemi Marie Obenour * = (2^64 - 1).
76856c052d3SDemi Marie Obenour */
7691dd79f9eSDemi Marie Obenour CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
77056c052d3SDemi Marie Obenour emad_end = (desc->emad_count * (unsigned long long)emad_size) +
77156c052d3SDemi Marie Obenour (unsigned long long)sizeof(struct ffa_comp_mrd) +
77256c052d3SDemi Marie Obenour (unsigned long long)emad_offset;
77356c052d3SDemi Marie Obenour
77456c052d3SDemi Marie Obenour if (emad_end > total_length) {
77556c052d3SDemi Marie Obenour WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
77656c052d3SDemi Marie Obenour __func__, emad_end, total_length);
77756c052d3SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
77856c052d3SDemi Marie Obenour }
77956c052d3SDemi Marie Obenour
78056c052d3SDemi Marie Obenour return 0;
78156c052d3SDemi Marie Obenour }
78256c052d3SDemi Marie Obenour
783a0239da9SDemi Marie Obenour static inline const struct ffa_emad_v1_0 *
emad_advance(const struct ffa_emad_v1_0 * emad,size_t offset)784a0239da9SDemi Marie Obenour emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
785a0239da9SDemi Marie Obenour {
786a0239da9SDemi Marie Obenour return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
787a0239da9SDemi Marie Obenour }
788a0239da9SDemi Marie Obenour
789e0b1a6d5SMarc Bonnici /**
790e0b1a6d5SMarc Bonnici * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
791e0b1a6d5SMarc Bonnici * @obj: Object containing ffa_memory_region_descriptor.
7927e804f96SMarc Bonnici * @ffa_version: FF-A version of the provided descriptor.
793e0b1a6d5SMarc Bonnici *
79491567c38SDemi Marie Obenour * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
79591567c38SDemi Marie Obenour * constituent_memory_region_descriptor offset or count is invalid.
796e0b1a6d5SMarc Bonnici */
spmc_shmem_check_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)7977e804f96SMarc Bonnici static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
7987e804f96SMarc Bonnici uint32_t ffa_version)
799e0b1a6d5SMarc Bonnici {
80095c56cb1SDemi Marie Obenour unsigned long long total_page_count;
801a0239da9SDemi Marie Obenour const struct ffa_emad_v1_0 *first_emad;
802a0239da9SDemi Marie Obenour const struct ffa_emad_v1_0 *end_emad;
80348ffc74cSDemi Marie Obenour size_t emad_size;
80495c56cb1SDemi Marie Obenour uint32_t comp_mrd_offset;
805966c63e6SDemi Marie Obenour size_t header_emad_size;
806966c63e6SDemi Marie Obenour size_t size;
807966c63e6SDemi Marie Obenour size_t count;
808966c63e6SDemi Marie Obenour size_t expected_size;
80995c56cb1SDemi Marie Obenour const struct ffa_comp_mrd *comp;
81048ffc74cSDemi Marie Obenour
8119526282aSDemi Marie Obenour if (obj->desc_filled != obj->desc_size) {
8129526282aSDemi Marie Obenour ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
8139526282aSDemi Marie Obenour __func__, obj->desc_filled, obj->desc_size);
8149526282aSDemi Marie Obenour panic();
8159526282aSDemi Marie Obenour }
816fef85e1eSMarc Bonnici
8179526282aSDemi Marie Obenour if (spmc_validate_mtd_start(&obj->desc, ffa_version,
8189526282aSDemi Marie Obenour obj->desc_filled, obj->desc_size)) {
8199526282aSDemi Marie Obenour ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
8209526282aSDemi Marie Obenour __func__);
8219526282aSDemi Marie Obenour panic();
822e0b1a6d5SMarc Bonnici }
823e0b1a6d5SMarc Bonnici
824a0239da9SDemi Marie Obenour first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
82548ffc74cSDemi Marie Obenour ffa_version, &emad_size);
826a0239da9SDemi Marie Obenour end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
827966c63e6SDemi Marie Obenour comp_mrd_offset = first_emad->comp_mrd_offset;
82848ffc74cSDemi Marie Obenour
829a0239da9SDemi Marie Obenour /* Loop through the endpoint descriptors, validating each of them. */
830b8007bebSDemi Marie Obenour for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
83148ffc74cSDemi Marie Obenour ffa_endpoint_id16_t ep_id;
8327e804f96SMarc Bonnici
83346d6b370SDemi Marie Obenour /*
83448ffc74cSDemi Marie Obenour * If a partition ID resides in the secure world validate that
83548ffc74cSDemi Marie Obenour * the partition ID is for a known partition. Ignore any
83648ffc74cSDemi Marie Obenour * partition ID belonging to the normal world as it is assumed
83748ffc74cSDemi Marie Obenour * the Hypervisor will have validated these.
83848ffc74cSDemi Marie Obenour */
83948ffc74cSDemi Marie Obenour ep_id = emad->mapd.endpoint_id;
84048ffc74cSDemi Marie Obenour if (ffa_is_secure_world_id(ep_id)) {
84148ffc74cSDemi Marie Obenour if (spmc_get_sp_ctx(ep_id) == NULL) {
84248ffc74cSDemi Marie Obenour WARN("%s: Invalid receiver id 0x%x\n",
84348ffc74cSDemi Marie Obenour __func__, ep_id);
84491567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
84548ffc74cSDemi Marie Obenour }
84648ffc74cSDemi Marie Obenour }
84748ffc74cSDemi Marie Obenour
84848ffc74cSDemi Marie Obenour /*
84946d6b370SDemi Marie Obenour * The offset provided to the composite memory region descriptor
850966c63e6SDemi Marie Obenour * should be consistent across endpoint descriptors.
85146d6b370SDemi Marie Obenour */
852966c63e6SDemi Marie Obenour if (comp_mrd_offset != emad->comp_mrd_offset) {
85346d6b370SDemi Marie Obenour ERROR("%s: mismatching offsets provided, %u != %u\n",
854966c63e6SDemi Marie Obenour __func__, emad->comp_mrd_offset, comp_mrd_offset);
85591567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
85646d6b370SDemi Marie Obenour }
857b8007bebSDemi Marie Obenour
858b8007bebSDemi Marie Obenour /* Advance to the next endpoint descriptor */
859b8007bebSDemi Marie Obenour emad = emad_advance(emad, emad_size);
860b8007bebSDemi Marie Obenour
861b8007bebSDemi Marie Obenour /*
862b8007bebSDemi Marie Obenour * Ensure neither this emad nor any subsequent emads have
863b8007bebSDemi Marie Obenour * the same partition ID as the previous emad.
864b8007bebSDemi Marie Obenour */
865b8007bebSDemi Marie Obenour for (const struct ffa_emad_v1_0 *other_emad = emad;
866b8007bebSDemi Marie Obenour other_emad < end_emad;
867b8007bebSDemi Marie Obenour other_emad = emad_advance(other_emad, emad_size)) {
868b8007bebSDemi Marie Obenour if (ep_id == other_emad->mapd.endpoint_id) {
869b8007bebSDemi Marie Obenour WARN("%s: Duplicated endpoint id 0x%x\n",
870b8007bebSDemi Marie Obenour __func__, emad->mapd.endpoint_id);
871b8007bebSDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
872b8007bebSDemi Marie Obenour }
873b8007bebSDemi Marie Obenour }
87446d6b370SDemi Marie Obenour }
87546d6b370SDemi Marie Obenour
876966c63e6SDemi Marie Obenour header_emad_size = (size_t)((const uint8_t *)end_emad -
877966c63e6SDemi Marie Obenour (const uint8_t *)&obj->desc);
878e0b1a6d5SMarc Bonnici
879794c409fSDemi Marie Obenour /*
880794c409fSDemi Marie Obenour * Check that the composite descriptor
881794c409fSDemi Marie Obenour * is after the endpoint descriptors.
882794c409fSDemi Marie Obenour */
883966c63e6SDemi Marie Obenour if (comp_mrd_offset < header_emad_size) {
884e0b1a6d5SMarc Bonnici WARN("%s: invalid object, offset %u < header + emad %zu\n",
885966c63e6SDemi Marie Obenour __func__, comp_mrd_offset, header_emad_size);
88691567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
887e0b1a6d5SMarc Bonnici }
888e0b1a6d5SMarc Bonnici
88927ac582aSDemi Marie Obenour /* Ensure the composite descriptor offset is aligned. */
89027ac582aSDemi Marie Obenour if (!is_aligned(comp_mrd_offset, 16)) {
89127ac582aSDemi Marie Obenour WARN("%s: invalid object, unaligned composite memory "
89227ac582aSDemi Marie Obenour "region descriptor offset %u.\n",
89327ac582aSDemi Marie Obenour __func__, comp_mrd_offset);
89427ac582aSDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
89527ac582aSDemi Marie Obenour }
89627ac582aSDemi Marie Obenour
897e0b1a6d5SMarc Bonnici size = obj->desc_size;
898e0b1a6d5SMarc Bonnici
899794c409fSDemi Marie Obenour /* Check that the composite descriptor is in bounds. */
900966c63e6SDemi Marie Obenour if (comp_mrd_offset > size) {
901e0b1a6d5SMarc Bonnici WARN("%s: invalid object, offset %u > total size %zu\n",
902966c63e6SDemi Marie Obenour __func__, comp_mrd_offset, obj->desc_size);
90391567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
904e0b1a6d5SMarc Bonnici }
905966c63e6SDemi Marie Obenour size -= comp_mrd_offset;
906e0b1a6d5SMarc Bonnici
90795c56cb1SDemi Marie Obenour /* Check that there is enough space for the composite descriptor. */
908e0b1a6d5SMarc Bonnici if (size < sizeof(struct ffa_comp_mrd)) {
909e0b1a6d5SMarc Bonnici WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
910966c63e6SDemi Marie Obenour __func__, comp_mrd_offset, obj->desc_size);
91191567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
912e0b1a6d5SMarc Bonnici }
91395c56cb1SDemi Marie Obenour size -= sizeof(*comp);
914e0b1a6d5SMarc Bonnici
915e0b1a6d5SMarc Bonnici count = size / sizeof(struct ffa_cons_mrd);
916e0b1a6d5SMarc Bonnici
91795c56cb1SDemi Marie Obenour comp = (const struct ffa_comp_mrd *)
91895c56cb1SDemi Marie Obenour ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
9197e804f96SMarc Bonnici
920e0b1a6d5SMarc Bonnici if (comp->address_range_count != count) {
921e0b1a6d5SMarc Bonnici WARN("%s: invalid object, desc count %u != %zu\n",
922e0b1a6d5SMarc Bonnici __func__, comp->address_range_count, count);
92391567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
924e0b1a6d5SMarc Bonnici }
925e0b1a6d5SMarc Bonnici
92695c56cb1SDemi Marie Obenour /* Ensure that the expected and actual sizes are equal. */
927966c63e6SDemi Marie Obenour expected_size = comp_mrd_offset + sizeof(*comp) +
9281198ff84SDemi Marie Obenour count * sizeof(struct ffa_cons_mrd);
9297e804f96SMarc Bonnici
930e0b1a6d5SMarc Bonnici if (expected_size != obj->desc_size) {
931e0b1a6d5SMarc Bonnici WARN("%s: invalid object, computed size %zu != size %zu\n",
932e0b1a6d5SMarc Bonnici __func__, expected_size, obj->desc_size);
93391567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
934e0b1a6d5SMarc Bonnici }
935e0b1a6d5SMarc Bonnici
936e0b1a6d5SMarc Bonnici total_page_count = 0;
937e0b1a6d5SMarc Bonnici
93895c56cb1SDemi Marie Obenour /*
93995c56cb1SDemi Marie Obenour * comp->address_range_count is 32-bit, so 'count' must fit in a
94095c56cb1SDemi Marie Obenour * uint32_t at this point.
94195c56cb1SDemi Marie Obenour */
942e0b1a6d5SMarc Bonnici for (size_t i = 0; i < count; i++) {
943327b5b8bSDemi Marie Obenour const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
944327b5b8bSDemi Marie Obenour
945327b5b8bSDemi Marie Obenour if (!is_aligned(mrd->address, PAGE_SIZE)) {
946327b5b8bSDemi Marie Obenour WARN("%s: invalid object, address in region descriptor "
947327b5b8bSDemi Marie Obenour "%zu not 4K aligned (got 0x%016llx)",
948327b5b8bSDemi Marie Obenour __func__, i, (unsigned long long)mrd->address);
949327b5b8bSDemi Marie Obenour }
950327b5b8bSDemi Marie Obenour
95195c56cb1SDemi Marie Obenour /*
95295c56cb1SDemi Marie Obenour * No overflow possible: total_page_count can hold at
95395c56cb1SDemi Marie Obenour * least 2^64 - 1, but will be have at most 2^32 - 1.
95495c56cb1SDemi Marie Obenour * values added to it, each of which cannot exceed 2^32 - 1.
95595c56cb1SDemi Marie Obenour */
956327b5b8bSDemi Marie Obenour total_page_count += mrd->page_count;
957e0b1a6d5SMarc Bonnici }
95895c56cb1SDemi Marie Obenour
959e0b1a6d5SMarc Bonnici if (comp->total_page_count != total_page_count) {
96095c56cb1SDemi Marie Obenour WARN("%s: invalid object, desc total_page_count %u != %llu\n",
96195c56cb1SDemi Marie Obenour __func__, comp->total_page_count, total_page_count);
96291567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
963e0b1a6d5SMarc Bonnici }
964966c63e6SDemi Marie Obenour
965fef85e1eSMarc Bonnici return 0;
966fef85e1eSMarc Bonnici }
967e0b1a6d5SMarc Bonnici
968fef85e1eSMarc Bonnici /**
969fef85e1eSMarc Bonnici * spmc_shmem_check_state_obj - Check if the descriptor describes memory
970fef85e1eSMarc Bonnici * regions that are currently involved with an
971fef85e1eSMarc Bonnici * existing memory transactions. This implies that
972fef85e1eSMarc Bonnici * the memory is not in a valid state for lending.
973fef85e1eSMarc Bonnici * @obj: Object containing ffa_memory_region_descriptor.
974fef85e1eSMarc Bonnici *
97591567c38SDemi Marie Obenour * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
97691567c38SDemi Marie Obenour * state.
977fef85e1eSMarc Bonnici */
spmc_shmem_check_state_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)9787e804f96SMarc Bonnici static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
9797e804f96SMarc Bonnici uint32_t ffa_version)
980fef85e1eSMarc Bonnici {
981fef85e1eSMarc Bonnici size_t obj_offset = 0;
982fef85e1eSMarc Bonnici struct spmc_shmem_obj *inflight_obj;
983fef85e1eSMarc Bonnici
984fef85e1eSMarc Bonnici struct ffa_comp_mrd *other_mrd;
9857e804f96SMarc Bonnici struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
9867e804f96SMarc Bonnici ffa_version);
9877e804f96SMarc Bonnici
9887e804f96SMarc Bonnici if (requested_mrd == NULL) {
98991567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
9907e804f96SMarc Bonnici }
991fef85e1eSMarc Bonnici
992fef85e1eSMarc Bonnici inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
993fef85e1eSMarc Bonnici &obj_offset);
994fef85e1eSMarc Bonnici
995fef85e1eSMarc Bonnici while (inflight_obj != NULL) {
996fef85e1eSMarc Bonnici /*
997fef85e1eSMarc Bonnici * Don't compare the transaction to itself or to partially
998fef85e1eSMarc Bonnici * transmitted descriptors.
999fef85e1eSMarc Bonnici */
1000fef85e1eSMarc Bonnici if ((obj->desc.handle != inflight_obj->desc.handle) &&
1001fef85e1eSMarc Bonnici (obj->desc_size == obj->desc_filled)) {
10027e804f96SMarc Bonnici other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
100325eb2d41SMarc Bonnici FFA_VERSION_COMPILED);
10047e804f96SMarc Bonnici if (other_mrd == NULL) {
100591567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
10067e804f96SMarc Bonnici }
1007fef85e1eSMarc Bonnici if (overlapping_memory_regions(requested_mrd,
1008fef85e1eSMarc Bonnici other_mrd)) {
100991567c38SDemi Marie Obenour return FFA_ERROR_INVALID_PARAMETER;
1010fef85e1eSMarc Bonnici }
1011fef85e1eSMarc Bonnici }
1012fef85e1eSMarc Bonnici
1013fef85e1eSMarc Bonnici inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
1014fef85e1eSMarc Bonnici &obj_offset);
1015fef85e1eSMarc Bonnici }
1016e0b1a6d5SMarc Bonnici return 0;
1017e0b1a6d5SMarc Bonnici }
1018e0b1a6d5SMarc Bonnici
spmc_ffa_fill_desc(struct mailbox * mbox,struct spmc_shmem_obj * obj,uint32_t fragment_length,ffa_mtd_flag32_t mtd_flag,uint32_t ffa_version,void * smc_handle)1019e0b1a6d5SMarc Bonnici static long spmc_ffa_fill_desc(struct mailbox *mbox,
1020e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj,
1021e0b1a6d5SMarc Bonnici uint32_t fragment_length,
1022e0b1a6d5SMarc Bonnici ffa_mtd_flag32_t mtd_flag,
10237e804f96SMarc Bonnici uint32_t ffa_version,
1024e0b1a6d5SMarc Bonnici void *smc_handle)
1025e0b1a6d5SMarc Bonnici {
1026e0b1a6d5SMarc Bonnici int ret;
1027e0b1a6d5SMarc Bonnici uint32_t handle_low;
1028e0b1a6d5SMarc Bonnici uint32_t handle_high;
1029e0b1a6d5SMarc Bonnici
1030e0b1a6d5SMarc Bonnici if (mbox->rxtx_page_count == 0U) {
1031e0b1a6d5SMarc Bonnici WARN("%s: buffer pair not registered.\n", __func__);
10327e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1033e0b1a6d5SMarc Bonnici goto err_arg;
1034e0b1a6d5SMarc Bonnici }
1035e0b1a6d5SMarc Bonnici
103643318e4aSDemi Marie Obenour CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
103743318e4aSDemi Marie Obenour if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
103843318e4aSDemi Marie Obenour WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
103943318e4aSDemi Marie Obenour fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
10407e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1041e0b1a6d5SMarc Bonnici goto err_arg;
1042e0b1a6d5SMarc Bonnici }
1043e0b1a6d5SMarc Bonnici
1044e0b1a6d5SMarc Bonnici if (fragment_length > obj->desc_size - obj->desc_filled) {
1045e0b1a6d5SMarc Bonnici WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1046e0b1a6d5SMarc Bonnici fragment_length, obj->desc_size - obj->desc_filled);
10477e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1048e0b1a6d5SMarc Bonnici goto err_arg;
1049e0b1a6d5SMarc Bonnici }
1050e0b1a6d5SMarc Bonnici
105121ed9ea3SMarc Bonnici memcpy((uint8_t *)&obj->desc + obj->desc_filled,
105221ed9ea3SMarc Bonnici (uint8_t *) mbox->tx_buffer, fragment_length);
105321ed9ea3SMarc Bonnici
1054e0b1a6d5SMarc Bonnici /* Ensure that the sender ID resides in the normal world. */
1055e0b1a6d5SMarc Bonnici if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1056e0b1a6d5SMarc Bonnici WARN("%s: Invalid sender ID 0x%x.\n",
1057e0b1a6d5SMarc Bonnici __func__, obj->desc.sender_id);
1058e0b1a6d5SMarc Bonnici ret = FFA_ERROR_DENIED;
1059e0b1a6d5SMarc Bonnici goto err_arg;
1060e0b1a6d5SMarc Bonnici }
1061e0b1a6d5SMarc Bonnici
1062153eb4c8SAndrei Homescu /*
1063153eb4c8SAndrei Homescu * Ensure the NS bit is set to 0. Only perform this check
1064153eb4c8SAndrei Homescu * for the first fragment, because the bit will be set for
1065153eb4c8SAndrei Homescu * all the later fragments.
1066153eb4c8SAndrei Homescu */
1067153eb4c8SAndrei Homescu if (obj->desc_filled == 0U &&
1068153eb4c8SAndrei Homescu (obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
10690560b53eSMarc Bonnici WARN("%s: NS mem attributes flags MBZ.\n", __func__);
10700560b53eSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
10710560b53eSMarc Bonnici goto err_arg;
10720560b53eSMarc Bonnici }
1073cb2e5746SAndrei Homescu /*
1074cb2e5746SAndrei Homescu * Ensure the NS bit is set to 1 since we only allow non-secure senders.
1075cb2e5746SAndrei Homescu * The specification requires that the NS bit is MBZ for
1076cb2e5746SAndrei Homescu * FFA_MEM_{DONATE,LEND,SHARE,RETRIEVE_REQ}, but we set the bit here
1077cb2e5746SAndrei Homescu * for internal bookkeeping to mark that the transaction did come
1078cb2e5746SAndrei Homescu * from the normal world.
1079cb2e5746SAndrei Homescu */
1080cb2e5746SAndrei Homescu obj->desc.memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
10810560b53eSMarc Bonnici
1082e0b1a6d5SMarc Bonnici /*
1083e0b1a6d5SMarc Bonnici * We don't currently support any optional flags so ensure none are
1084e0b1a6d5SMarc Bonnici * requested.
1085e0b1a6d5SMarc Bonnici */
1086e0b1a6d5SMarc Bonnici if (obj->desc.flags != 0U && mtd_flag != 0U &&
1087e0b1a6d5SMarc Bonnici (obj->desc.flags != mtd_flag)) {
1088e0b1a6d5SMarc Bonnici WARN("%s: invalid memory transaction flags %u != %u\n",
1089e0b1a6d5SMarc Bonnici __func__, obj->desc.flags, mtd_flag);
10907e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1091e0b1a6d5SMarc Bonnici goto err_arg;
1092e0b1a6d5SMarc Bonnici }
1093e0b1a6d5SMarc Bonnici
1094e0b1a6d5SMarc Bonnici if (obj->desc_filled == 0U) {
1095e0b1a6d5SMarc Bonnici /* First fragment, descriptor header has been copied */
109656c052d3SDemi Marie Obenour ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
109756c052d3SDemi Marie Obenour fragment_length, obj->desc_size);
109856c052d3SDemi Marie Obenour if (ret != 0) {
109956c052d3SDemi Marie Obenour goto err_bad_desc;
110056c052d3SDemi Marie Obenour }
110156c052d3SDemi Marie Obenour
1102e0b1a6d5SMarc Bonnici obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1103e0b1a6d5SMarc Bonnici obj->desc.flags |= mtd_flag;
1104e0b1a6d5SMarc Bonnici }
1105e0b1a6d5SMarc Bonnici
1106e0b1a6d5SMarc Bonnici obj->desc_filled += fragment_length;
1107e0b1a6d5SMarc Bonnici
1108e0b1a6d5SMarc Bonnici handle_low = (uint32_t)obj->desc.handle;
1109e0b1a6d5SMarc Bonnici handle_high = obj->desc.handle >> 32;
1110e0b1a6d5SMarc Bonnici
1111e0b1a6d5SMarc Bonnici if (obj->desc_filled != obj->desc_size) {
1112e0b1a6d5SMarc Bonnici SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1113e0b1a6d5SMarc Bonnici handle_high, obj->desc_filled,
1114e0b1a6d5SMarc Bonnici (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1115e0b1a6d5SMarc Bonnici }
1116e0b1a6d5SMarc Bonnici
1117f0244e5dSMarc Bonnici /* The full descriptor has been received, perform any final checks. */
1118f0244e5dSMarc Bonnici
1119d781959fSDemi Marie Obenour ret = spmc_shmem_check_obj(obj, ffa_version);
1120d781959fSDemi Marie Obenour if (ret != 0) {
1121d781959fSDemi Marie Obenour goto err_bad_desc;
1122d781959fSDemi Marie Obenour }
1123d781959fSDemi Marie Obenour
11247e804f96SMarc Bonnici ret = spmc_shmem_check_state_obj(obj, ffa_version);
1125fef85e1eSMarc Bonnici if (ret) {
1126fef85e1eSMarc Bonnici ERROR("%s: invalid memory region descriptor.\n", __func__);
1127fef85e1eSMarc Bonnici goto err_bad_desc;
1128fef85e1eSMarc Bonnici }
1129fef85e1eSMarc Bonnici
11307e804f96SMarc Bonnici /*
11317e804f96SMarc Bonnici * Everything checks out, if the sender was using FF-A v1.0, convert
11327e804f96SMarc Bonnici * the descriptor format to use the v1.1 structures.
11337e804f96SMarc Bonnici */
11347e804f96SMarc Bonnici if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
11357e804f96SMarc Bonnici struct spmc_shmem_obj *v1_1_obj;
11367e804f96SMarc Bonnici uint64_t mem_handle;
11377e804f96SMarc Bonnici
11387e804f96SMarc Bonnici /* Calculate the size that the v1.1 descriptor will required. */
113927c02425SDemi Marie Obenour uint64_t v1_1_desc_size =
11407e804f96SMarc Bonnici spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1141be075c3eSvallau01 obj->desc_size);
11427e804f96SMarc Bonnici
114327c02425SDemi Marie Obenour if (v1_1_desc_size > UINT32_MAX) {
114427c02425SDemi Marie Obenour ret = FFA_ERROR_NO_MEMORY;
11457e804f96SMarc Bonnici goto err_arg;
11467e804f96SMarc Bonnici }
11477e804f96SMarc Bonnici
11487e804f96SMarc Bonnici /* Get a new obj to store the v1.1 descriptor. */
11497e804f96SMarc Bonnici v1_1_obj =
115027c02425SDemi Marie Obenour spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
11517e804f96SMarc Bonnici
1152cee8bb3bSvallau01 if (!v1_1_obj) {
11537e804f96SMarc Bonnici ret = FFA_ERROR_NO_MEMORY;
11547e804f96SMarc Bonnici goto err_arg;
11557e804f96SMarc Bonnici }
11567e804f96SMarc Bonnici
11577e804f96SMarc Bonnici /* Perform the conversion from v1.0 to v1.1. */
115827c02425SDemi Marie Obenour v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
115927c02425SDemi Marie Obenour v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
11607e804f96SMarc Bonnici if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
11617e804f96SMarc Bonnici ERROR("%s: Could not convert mtd!\n", __func__);
11627e804f96SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
11637e804f96SMarc Bonnici goto err_arg;
11647e804f96SMarc Bonnici }
11657e804f96SMarc Bonnici
11667e804f96SMarc Bonnici /*
11677e804f96SMarc Bonnici * We're finished with the v1.0 descriptor so free it
11687e804f96SMarc Bonnici * and continue our checks with the new v1.1 descriptor.
11697e804f96SMarc Bonnici */
11707e804f96SMarc Bonnici mem_handle = obj->desc.handle;
11717e804f96SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
11727e804f96SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
11737e804f96SMarc Bonnici if (obj == NULL) {
11747e804f96SMarc Bonnici ERROR("%s: Failed to find converted descriptor.\n",
11757e804f96SMarc Bonnici __func__);
11767e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
11777e804f96SMarc Bonnici return spmc_ffa_error_return(smc_handle, ret);
11787e804f96SMarc Bonnici }
11797e804f96SMarc Bonnici }
11807e804f96SMarc Bonnici
1181a8be4cd0SMarc Bonnici /* Allow for platform specific operations to be performed. */
1182a8be4cd0SMarc Bonnici ret = plat_spmc_shmem_begin(&obj->desc);
1183a8be4cd0SMarc Bonnici if (ret != 0) {
1184a8be4cd0SMarc Bonnici goto err_arg;
1185a8be4cd0SMarc Bonnici }
1186a8be4cd0SMarc Bonnici
1187e0b1a6d5SMarc Bonnici SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1188e0b1a6d5SMarc Bonnici 0, 0, 0);
1189e0b1a6d5SMarc Bonnici
1190e0b1a6d5SMarc Bonnici err_bad_desc:
1191e0b1a6d5SMarc Bonnici err_arg:
1192e0b1a6d5SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
11937e804f96SMarc Bonnici return spmc_ffa_error_return(smc_handle, ret);
1194e0b1a6d5SMarc Bonnici }
1195e0b1a6d5SMarc Bonnici
1196e0b1a6d5SMarc Bonnici /**
1197e0b1a6d5SMarc Bonnici * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1198e0b1a6d5SMarc Bonnici * @client: Client state.
1199e0b1a6d5SMarc Bonnici * @total_length: Total length of shared memory descriptor.
1200e0b1a6d5SMarc Bonnici * @fragment_length: Length of fragment of shared memory descriptor passed in
1201e0b1a6d5SMarc Bonnici * this call.
1202e0b1a6d5SMarc Bonnici * @address: Not supported, must be 0.
1203e0b1a6d5SMarc Bonnici * @page_count: Not supported, must be 0.
1204e0b1a6d5SMarc Bonnici * @smc_handle: Handle passed to smc call. Used to return
1205e0b1a6d5SMarc Bonnici * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1206e0b1a6d5SMarc Bonnici *
1207e0b1a6d5SMarc Bonnici * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1208e0b1a6d5SMarc Bonnici * to share or lend memory from non-secure os to secure os (with no stream
1209e0b1a6d5SMarc Bonnici * endpoints).
1210e0b1a6d5SMarc Bonnici *
1211e0b1a6d5SMarc Bonnici * Return: 0 on success, error code on failure.
1212e0b1a6d5SMarc Bonnici */
spmc_ffa_mem_send(uint32_t smc_fid,bool secure_origin,uint64_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1213e0b1a6d5SMarc Bonnici long spmc_ffa_mem_send(uint32_t smc_fid,
1214e0b1a6d5SMarc Bonnici bool secure_origin,
1215e0b1a6d5SMarc Bonnici uint64_t total_length,
1216e0b1a6d5SMarc Bonnici uint32_t fragment_length,
1217e0b1a6d5SMarc Bonnici uint64_t address,
1218e0b1a6d5SMarc Bonnici uint32_t page_count,
1219e0b1a6d5SMarc Bonnici void *cookie,
1220e0b1a6d5SMarc Bonnici void *handle,
1221e0b1a6d5SMarc Bonnici uint64_t flags)
1222e0b1a6d5SMarc Bonnici
1223e0b1a6d5SMarc Bonnici {
1224e0b1a6d5SMarc Bonnici long ret;
1225e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
1226e0b1a6d5SMarc Bonnici struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1227e0b1a6d5SMarc Bonnici ffa_mtd_flag32_t mtd_flag;
12287e804f96SMarc Bonnici uint32_t ffa_version = get_partition_ffa_version(secure_origin);
122952d8d506SDemi Marie Obenour size_t min_desc_size;
1230e0b1a6d5SMarc Bonnici
1231e0b1a6d5SMarc Bonnici if (address != 0U || page_count != 0U) {
1232e0b1a6d5SMarc Bonnici WARN("%s: custom memory region for message not supported.\n",
1233e0b1a6d5SMarc Bonnici __func__);
1234e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1235e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1236e0b1a6d5SMarc Bonnici }
1237e0b1a6d5SMarc Bonnici
1238e0b1a6d5SMarc Bonnici if (secure_origin) {
1239e0b1a6d5SMarc Bonnici WARN("%s: unsupported share direction.\n", __func__);
1240e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1241e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1242e0b1a6d5SMarc Bonnici }
1243e0b1a6d5SMarc Bonnici
124452d8d506SDemi Marie Obenour if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
124552d8d506SDemi Marie Obenour min_desc_size = sizeof(struct ffa_mtd_v1_0);
1246*3f1c63ddSJay Monkman } else if (spmc_compatible_version(ffa_version, 1, 1)) {
124752d8d506SDemi Marie Obenour min_desc_size = sizeof(struct ffa_mtd);
124852d8d506SDemi Marie Obenour } else {
124952d8d506SDemi Marie Obenour WARN("%s: bad FF-A version.\n", __func__);
125052d8d506SDemi Marie Obenour return spmc_ffa_error_return(handle,
125152d8d506SDemi Marie Obenour FFA_ERROR_INVALID_PARAMETER);
125252d8d506SDemi Marie Obenour }
125352d8d506SDemi Marie Obenour
125452d8d506SDemi Marie Obenour /* Check if the descriptor is too small for the FF-A version. */
125552d8d506SDemi Marie Obenour if (fragment_length < min_desc_size) {
1256e0b1a6d5SMarc Bonnici WARN("%s: bad first fragment size %u < %zu\n",
12577e804f96SMarc Bonnici __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1258e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1259e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1260e0b1a6d5SMarc Bonnici }
1261e0b1a6d5SMarc Bonnici
1262e0b1a6d5SMarc Bonnici if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1263e0b1a6d5SMarc Bonnici mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1264e0b1a6d5SMarc Bonnici } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1265e0b1a6d5SMarc Bonnici mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1266e0b1a6d5SMarc Bonnici } else {
1267e0b1a6d5SMarc Bonnici WARN("%s: invalid memory management operation.\n", __func__);
1268e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1269e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1270e0b1a6d5SMarc Bonnici }
1271e0b1a6d5SMarc Bonnici
1272e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
1273e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1274e0b1a6d5SMarc Bonnici if (obj == NULL) {
1275e0b1a6d5SMarc Bonnici ret = FFA_ERROR_NO_MEMORY;
1276e0b1a6d5SMarc Bonnici goto err_unlock;
1277e0b1a6d5SMarc Bonnici }
1278e0b1a6d5SMarc Bonnici
1279e0b1a6d5SMarc Bonnici spin_lock(&mbox->lock);
12807e804f96SMarc Bonnici ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
12817e804f96SMarc Bonnici ffa_version, handle);
1282e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1283e0b1a6d5SMarc Bonnici
1284e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1285e0b1a6d5SMarc Bonnici return ret;
1286e0b1a6d5SMarc Bonnici
1287e0b1a6d5SMarc Bonnici err_unlock:
1288e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1289e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
1290e0b1a6d5SMarc Bonnici }
1291e0b1a6d5SMarc Bonnici
1292e0b1a6d5SMarc Bonnici /**
1293e0b1a6d5SMarc Bonnici * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1294e0b1a6d5SMarc Bonnici * @client: Client state.
1295e0b1a6d5SMarc Bonnici * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1296e0b1a6d5SMarc Bonnici * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1297e0b1a6d5SMarc Bonnici * @fragment_length: Length of fragments transmitted.
1298e0b1a6d5SMarc Bonnici * @sender_id: Vmid of sender in bits [31:16]
1299e0b1a6d5SMarc Bonnici * @smc_handle: Handle passed to smc call. Used to return
1300e0b1a6d5SMarc Bonnici * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1301e0b1a6d5SMarc Bonnici *
1302e0b1a6d5SMarc Bonnici * Return: @smc_handle on success, error code on failure.
1303e0b1a6d5SMarc Bonnici */
spmc_ffa_mem_frag_tx(uint32_t smc_fid,bool secure_origin,uint64_t handle_low,uint64_t handle_high,uint32_t fragment_length,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1304e0b1a6d5SMarc Bonnici long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1305e0b1a6d5SMarc Bonnici bool secure_origin,
1306e0b1a6d5SMarc Bonnici uint64_t handle_low,
1307e0b1a6d5SMarc Bonnici uint64_t handle_high,
1308e0b1a6d5SMarc Bonnici uint32_t fragment_length,
1309e0b1a6d5SMarc Bonnici uint32_t sender_id,
1310e0b1a6d5SMarc Bonnici void *cookie,
1311e0b1a6d5SMarc Bonnici void *handle,
1312e0b1a6d5SMarc Bonnici uint64_t flags)
1313e0b1a6d5SMarc Bonnici {
1314e0b1a6d5SMarc Bonnici long ret;
1315e0b1a6d5SMarc Bonnici uint32_t desc_sender_id;
13167e804f96SMarc Bonnici uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1317e0b1a6d5SMarc Bonnici struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1318e0b1a6d5SMarc Bonnici
1319e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
1320e0b1a6d5SMarc Bonnici uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1321e0b1a6d5SMarc Bonnici
1322e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
1323e0b1a6d5SMarc Bonnici
1324e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1325e0b1a6d5SMarc Bonnici if (obj == NULL) {
1326e0b1a6d5SMarc Bonnici WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1327e0b1a6d5SMarc Bonnici __func__, mem_handle);
1328e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1329e0b1a6d5SMarc Bonnici goto err_unlock;
1330e0b1a6d5SMarc Bonnici }
1331e0b1a6d5SMarc Bonnici
1332e0b1a6d5SMarc Bonnici desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1333e0b1a6d5SMarc Bonnici if (sender_id != desc_sender_id) {
1334e0b1a6d5SMarc Bonnici WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1335e0b1a6d5SMarc Bonnici sender_id, desc_sender_id);
1336e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1337e0b1a6d5SMarc Bonnici goto err_unlock;
1338e0b1a6d5SMarc Bonnici }
1339e0b1a6d5SMarc Bonnici
1340e0b1a6d5SMarc Bonnici if (obj->desc_filled == obj->desc_size) {
1341e0b1a6d5SMarc Bonnici WARN("%s: object desc already filled, %zu\n", __func__,
1342e0b1a6d5SMarc Bonnici obj->desc_filled);
1343e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1344e0b1a6d5SMarc Bonnici goto err_unlock;
1345e0b1a6d5SMarc Bonnici }
1346e0b1a6d5SMarc Bonnici
1347e0b1a6d5SMarc Bonnici spin_lock(&mbox->lock);
13487e804f96SMarc Bonnici ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
13497e804f96SMarc Bonnici handle);
1350e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1351e0b1a6d5SMarc Bonnici
1352e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1353e0b1a6d5SMarc Bonnici return ret;
1354e0b1a6d5SMarc Bonnici
1355e0b1a6d5SMarc Bonnici err_unlock:
1356e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1357e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
1358e0b1a6d5SMarc Bonnici }
1359e0b1a6d5SMarc Bonnici
1360e0b1a6d5SMarc Bonnici /**
13617d34c9bbSAndrei Homescu * spmc_populate_ffa_hyp_descriptor - Populate the given buffer with a descriptor
13627d34c9bbSAndrei Homescu * for retrieval by the hypervisor.
13637d34c9bbSAndrei Homescu * @dst: Buffer to populate hypervisor ffa_memory_region_descriptor.
13647d34c9bbSAndrei Homescu * @orig_obj: Object containing original ffa_memory_region_descriptor.
13657d34c9bbSAndrei Homescu * @buf_size: Size of the buffer to populate.
13667d34c9bbSAndrei Homescu * @ffa_version: FF-A version of the caller.
13677d34c9bbSAndrei Homescu * @copy_size: Will be populated with the number of bytes copied.
13687d34c9bbSAndrei Homescu * @desc_size: Will be populated with the total size of the descriptor.
13697d34c9bbSAndrei Homescu */
13707d34c9bbSAndrei Homescu static uint32_t
spmc_populate_ffa_hyp_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,uint32_t ffa_version,size_t * copy_size,size_t * desc_size)13717d34c9bbSAndrei Homescu spmc_populate_ffa_hyp_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
13727d34c9bbSAndrei Homescu size_t buf_size, uint32_t ffa_version,
13737d34c9bbSAndrei Homescu size_t *copy_size, size_t *desc_size)
13747d34c9bbSAndrei Homescu {
13757d34c9bbSAndrei Homescu size_t mtd_size;
13767d34c9bbSAndrei Homescu size_t emad_size;
13777d34c9bbSAndrei Homescu size_t mrd_size;
13787d34c9bbSAndrei Homescu struct ffa_emad_v1_0 hyp_emad = {0};
13797d34c9bbSAndrei Homescu struct ffa_comp_mrd *orig_mrd;
13807d34c9bbSAndrei Homescu size_t orig_mrd_offset;
13817d34c9bbSAndrei Homescu size_t hyp_mrd_offset;
13827d34c9bbSAndrei Homescu size_t mrd_copy_size;
13837d34c9bbSAndrei Homescu
13847d34c9bbSAndrei Homescu if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
13857d34c9bbSAndrei Homescu struct ffa_mtd_v1_0 mtd = {0};
13867d34c9bbSAndrei Homescu
13877d34c9bbSAndrei Homescu mtd_size = sizeof(mtd);
13887d34c9bbSAndrei Homescu emad_size = sizeof(struct ffa_emad_v1_0);
13897d34c9bbSAndrei Homescu /* The composite MRD starts immediately after our single EMAD */
13907d34c9bbSAndrei Homescu hyp_mrd_offset = mtd_size + emad_size;
13917d34c9bbSAndrei Homescu if (hyp_mrd_offset > buf_size) {
13927d34c9bbSAndrei Homescu return FFA_ERROR_INVALID_PARAMETER;
13937d34c9bbSAndrei Homescu }
13947d34c9bbSAndrei Homescu
13957d34c9bbSAndrei Homescu mtd.sender_id = orig_obj->desc.sender_id;
13967d34c9bbSAndrei Homescu mtd.handle = orig_obj->desc.handle;
13977d34c9bbSAndrei Homescu mtd.emad_count = 1;
13987d34c9bbSAndrei Homescu memcpy(dst, &mtd, mtd_size);
13997d34c9bbSAndrei Homescu } else {
14007d34c9bbSAndrei Homescu struct ffa_mtd mtd = {0};
14017d34c9bbSAndrei Homescu
14027d34c9bbSAndrei Homescu mtd_size = sizeof(mtd);
14037d34c9bbSAndrei Homescu emad_size = sizeof(struct ffa_emad_v1_0);
14047d34c9bbSAndrei Homescu /* The composite MRD starts immediately after our single EMAD */
14057d34c9bbSAndrei Homescu hyp_mrd_offset = mtd_size + emad_size;
14067d34c9bbSAndrei Homescu if (hyp_mrd_offset > buf_size) {
14077d34c9bbSAndrei Homescu return FFA_ERROR_INVALID_PARAMETER;
14087d34c9bbSAndrei Homescu }
14097d34c9bbSAndrei Homescu
14107d34c9bbSAndrei Homescu mtd.sender_id = orig_obj->desc.sender_id;
14117d34c9bbSAndrei Homescu mtd.handle = orig_obj->desc.handle;
14127d34c9bbSAndrei Homescu mtd.emad_size = emad_size;
14137d34c9bbSAndrei Homescu mtd.emad_count = 1;
14147d34c9bbSAndrei Homescu mtd.emad_offset = mtd_size;
14157d34c9bbSAndrei Homescu memcpy(dst, &mtd, mtd_size);
14167d34c9bbSAndrei Homescu }
14177d34c9bbSAndrei Homescu
14187d34c9bbSAndrei Homescu orig_mrd = spmc_shmem_obj_get_comp_mrd(orig_obj, FFA_VERSION_COMPILED);
14197d34c9bbSAndrei Homescu orig_mrd_offset = (uint8_t *)orig_mrd - (uint8_t *)(&orig_obj->desc);
14207d34c9bbSAndrei Homescu mrd_size = sizeof(struct ffa_comp_mrd);
14217d34c9bbSAndrei Homescu mrd_size += orig_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
14227d34c9bbSAndrei Homescu
14237d34c9bbSAndrei Homescu /*
14247d34c9bbSAndrei Homescu * Compute the hypervisor fragment shift that we add to the fragment offset
14257d34c9bbSAndrei Homescu * to get the actual position inside obj->desc. The composite MRD starts
14267d34c9bbSAndrei Homescu * at obj->desc+orig_mrd_offset but at a possibly smaller offset within
14277d34c9bbSAndrei Homescu * the buffer that this function returns because there is only one EMAD.
14287d34c9bbSAndrei Homescu */
14297d34c9bbSAndrei Homescu orig_obj->hyp_shift = orig_mrd_offset - hyp_mrd_offset;
14307d34c9bbSAndrei Homescu
14317d34c9bbSAndrei Homescu mrd_copy_size = MIN(mrd_size, buf_size - hyp_mrd_offset);
14327d34c9bbSAndrei Homescu *copy_size = hyp_mrd_offset + mrd_copy_size;
14337d34c9bbSAndrei Homescu *desc_size = hyp_mrd_offset + mrd_size;
14347d34c9bbSAndrei Homescu
14357d34c9bbSAndrei Homescu hyp_emad.comp_mrd_offset = hyp_mrd_offset;
14367d34c9bbSAndrei Homescu memcpy((uint8_t *)dst + mtd_size, &hyp_emad, emad_size);
14377d34c9bbSAndrei Homescu memcpy((uint8_t *)dst + hyp_mrd_offset, orig_mrd, mrd_copy_size);
14387d34c9bbSAndrei Homescu
14397d34c9bbSAndrei Homescu return 0;
14407d34c9bbSAndrei Homescu }
14417d34c9bbSAndrei Homescu
14427d34c9bbSAndrei Homescu /**
1443cb2e5746SAndrei Homescu * spmc_ffa_mem_retrieve_update_ns_bit - Update the NS bit in the response descriptor
1444cb2e5746SAndrei Homescu * if the caller implements a version smaller
1445cb2e5746SAndrei Homescu * than FF-A 1.1 and if they have not requested
14467d34c9bbSAndrei Homescu * the functionality, or the caller is the
14477d34c9bbSAndrei Homescu * non-secure world.
14480560b53eSMarc Bonnici * @resp: Descriptor populated in callers RX buffer.
14490560b53eSMarc Bonnici * @sp_ctx: Context of the calling SP.
14500560b53eSMarc Bonnici */
spmc_ffa_mem_retrieve_update_ns_bit(struct ffa_mtd * resp,struct secure_partition_desc * sp_ctx,bool secure_origin)1451cb2e5746SAndrei Homescu void spmc_ffa_mem_retrieve_update_ns_bit(struct ffa_mtd *resp,
1452cb2e5746SAndrei Homescu struct secure_partition_desc *sp_ctx,
1453cb2e5746SAndrei Homescu bool secure_origin)
14540560b53eSMarc Bonnici {
14557d34c9bbSAndrei Homescu uint32_t ffa_version = get_partition_ffa_version(secure_origin);
14567d34c9bbSAndrei Homescu
1457cb2e5746SAndrei Homescu if (secure_origin &&
1458cb2e5746SAndrei Homescu sp_ctx->ffa_version < MAKE_FFA_VERSION(1, 1) &&
1459cb2e5746SAndrei Homescu !sp_ctx->ns_bit_requested) {
1460cb2e5746SAndrei Homescu resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
14617d34c9bbSAndrei Homescu } else if (!secure_origin) {
14627d34c9bbSAndrei Homescu /*
14637d34c9bbSAndrei Homescu * The NS bit is set by the SPMC in the corresponding invocation
14647d34c9bbSAndrei Homescu * of the FFA_MEM_RETRIEVE_RESP ABI at the Non-secure physical
14657d34c9bbSAndrei Homescu * FF-A instance as follows.
14667d34c9bbSAndrei Homescu */
14677d34c9bbSAndrei Homescu if (ffa_version > MAKE_FFA_VERSION(1, 0)) {
14687d34c9bbSAndrei Homescu /*
14697d34c9bbSAndrei Homescu * The bit is set to b’1 if the version of the Framework
14707d34c9bbSAndrei Homescu * implemented by the Hypervisor is greater than v1.0
14717d34c9bbSAndrei Homescu */
14727d34c9bbSAndrei Homescu resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
14737d34c9bbSAndrei Homescu } else {
14747d34c9bbSAndrei Homescu /*
14757d34c9bbSAndrei Homescu * The bit is set to b’0 if the version of the Framework
14767d34c9bbSAndrei Homescu * implemented by the Hypervisor is v1.0
14777d34c9bbSAndrei Homescu */
14787d34c9bbSAndrei Homescu resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
14797d34c9bbSAndrei Homescu }
14800560b53eSMarc Bonnici }
14810560b53eSMarc Bonnici }
14820560b53eSMarc Bonnici
14830560b53eSMarc Bonnici /**
1484e0b1a6d5SMarc Bonnici * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1485e0b1a6d5SMarc Bonnici * @smc_fid: FID of SMC
1486e0b1a6d5SMarc Bonnici * @total_length: Total length of retrieve request descriptor if this is
1487e0b1a6d5SMarc Bonnici * the first call. Otherwise (unsupported) must be 0.
1488e0b1a6d5SMarc Bonnici * @fragment_length: Length of fragment of retrieve request descriptor passed
1489e0b1a6d5SMarc Bonnici * in this call. Only @fragment_length == @length is
1490e0b1a6d5SMarc Bonnici * supported by this implementation.
1491e0b1a6d5SMarc Bonnici * @address: Not supported, must be 0.
1492e0b1a6d5SMarc Bonnici * @page_count: Not supported, must be 0.
1493e0b1a6d5SMarc Bonnici * @smc_handle: Handle passed to smc call. Used to return
1494e0b1a6d5SMarc Bonnici * FFA_MEM_RETRIEVE_RESP.
1495e0b1a6d5SMarc Bonnici *
1496e0b1a6d5SMarc Bonnici * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
14977d34c9bbSAndrei Homescu * Used by secure os to retrieve memory already shared by non-secure os,
14987d34c9bbSAndrei Homescu * or by the hypervisor to retrieve the memory region for a specific handle.
1499e0b1a6d5SMarc Bonnici * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1500e0b1a6d5SMarc Bonnici * the client must call FFA_MEM_FRAG_RX until the full response has been
1501e0b1a6d5SMarc Bonnici * received.
1502e0b1a6d5SMarc Bonnici *
1503e0b1a6d5SMarc Bonnici * Return: @handle on success, error code on failure.
1504e0b1a6d5SMarc Bonnici */
1505e0b1a6d5SMarc Bonnici long
spmc_ffa_mem_retrieve_req(uint32_t smc_fid,bool secure_origin,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1506e0b1a6d5SMarc Bonnici spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1507e0b1a6d5SMarc Bonnici bool secure_origin,
1508e0b1a6d5SMarc Bonnici uint32_t total_length,
1509e0b1a6d5SMarc Bonnici uint32_t fragment_length,
1510e0b1a6d5SMarc Bonnici uint64_t address,
1511e0b1a6d5SMarc Bonnici uint32_t page_count,
1512e0b1a6d5SMarc Bonnici void *cookie,
1513e0b1a6d5SMarc Bonnici void *handle,
1514e0b1a6d5SMarc Bonnici uint64_t flags)
1515e0b1a6d5SMarc Bonnici {
1516e0b1a6d5SMarc Bonnici int ret;
1517e0b1a6d5SMarc Bonnici size_t buf_size;
15187e804f96SMarc Bonnici size_t copy_size = 0;
15197e804f96SMarc Bonnici size_t min_desc_size;
15207e804f96SMarc Bonnici size_t out_desc_size = 0;
15217e804f96SMarc Bonnici
15227e804f96SMarc Bonnici /*
15237e804f96SMarc Bonnici * Currently we are only accessing fields that are the same in both the
15247e804f96SMarc Bonnici * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
15257e804f96SMarc Bonnici * here. We only need validate against the appropriate struct size.
15267e804f96SMarc Bonnici */
15277e804f96SMarc Bonnici struct ffa_mtd *resp;
15287e804f96SMarc Bonnici const struct ffa_mtd *req;
1529e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj = NULL;
1530e0b1a6d5SMarc Bonnici struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
15317e804f96SMarc Bonnici uint32_t ffa_version = get_partition_ffa_version(secure_origin);
15320560b53eSMarc Bonnici struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1533e0b1a6d5SMarc Bonnici
1534e0b1a6d5SMarc Bonnici if (address != 0U || page_count != 0U) {
1535e0b1a6d5SMarc Bonnici WARN("%s: custom memory region not supported.\n", __func__);
1536e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1537e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1538e0b1a6d5SMarc Bonnici }
1539e0b1a6d5SMarc Bonnici
1540e0b1a6d5SMarc Bonnici spin_lock(&mbox->lock);
1541e0b1a6d5SMarc Bonnici
1542e0b1a6d5SMarc Bonnici req = mbox->tx_buffer;
1543e0b1a6d5SMarc Bonnici resp = mbox->rx_buffer;
1544e0b1a6d5SMarc Bonnici buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1545e0b1a6d5SMarc Bonnici
1546e0b1a6d5SMarc Bonnici if (mbox->rxtx_page_count == 0U) {
1547e0b1a6d5SMarc Bonnici WARN("%s: buffer pair not registered.\n", __func__);
1548e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1549e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1550e0b1a6d5SMarc Bonnici }
1551e0b1a6d5SMarc Bonnici
1552e0b1a6d5SMarc Bonnici if (mbox->state != MAILBOX_STATE_EMPTY) {
1553e0b1a6d5SMarc Bonnici WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1554e0b1a6d5SMarc Bonnici ret = FFA_ERROR_DENIED;
1555e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1556e0b1a6d5SMarc Bonnici }
1557e0b1a6d5SMarc Bonnici
1558e0b1a6d5SMarc Bonnici if (fragment_length != total_length) {
1559e0b1a6d5SMarc Bonnici WARN("%s: fragmented retrieve request not supported.\n",
1560e0b1a6d5SMarc Bonnici __func__);
1561e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1562e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1563e0b1a6d5SMarc Bonnici }
1564e0b1a6d5SMarc Bonnici
15657d34c9bbSAndrei Homescu /* req->emad_count is not set for retrieve by hypervisor */
15667d34c9bbSAndrei Homescu if ((secure_origin && req->emad_count == 0U) ||
15677d34c9bbSAndrei Homescu (!secure_origin && req->emad_count != 0U)) {
1568f0244e5dSMarc Bonnici WARN("%s: unsupported attribute desc count %u.\n",
1569f0244e5dSMarc Bonnici __func__, obj->desc.emad_count);
1570ac568b2bSvallau01 ret = FFA_ERROR_INVALID_PARAMETER;
1571ac568b2bSvallau01 goto err_unlock_mailbox;
1572e0b1a6d5SMarc Bonnici }
1573e0b1a6d5SMarc Bonnici
15747e804f96SMarc Bonnici /* Determine the appropriate minimum descriptor size. */
15757e804f96SMarc Bonnici if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
15767e804f96SMarc Bonnici min_desc_size = sizeof(struct ffa_mtd_v1_0);
15777e804f96SMarc Bonnici } else {
15787e804f96SMarc Bonnici min_desc_size = sizeof(struct ffa_mtd);
15797e804f96SMarc Bonnici }
15807e804f96SMarc Bonnici if (total_length < min_desc_size) {
1581e0b1a6d5SMarc Bonnici WARN("%s: invalid length %u < %zu\n", __func__, total_length,
15827e804f96SMarc Bonnici min_desc_size);
1583e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1584e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1585e0b1a6d5SMarc Bonnici }
1586e0b1a6d5SMarc Bonnici
1587e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
1588e0b1a6d5SMarc Bonnici
1589e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1590e0b1a6d5SMarc Bonnici if (obj == NULL) {
1591e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1592e0b1a6d5SMarc Bonnici goto err_unlock_all;
1593e0b1a6d5SMarc Bonnici }
1594e0b1a6d5SMarc Bonnici
1595e0b1a6d5SMarc Bonnici if (obj->desc_filled != obj->desc_size) {
1596e0b1a6d5SMarc Bonnici WARN("%s: incomplete object desc filled %zu < size %zu\n",
1597e0b1a6d5SMarc Bonnici __func__, obj->desc_filled, obj->desc_size);
1598e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1599e0b1a6d5SMarc Bonnici goto err_unlock_all;
1600e0b1a6d5SMarc Bonnici }
1601e0b1a6d5SMarc Bonnici
1602e0b1a6d5SMarc Bonnici if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1603e0b1a6d5SMarc Bonnici WARN("%s: wrong sender id 0x%x != 0x%x\n",
1604e0b1a6d5SMarc Bonnici __func__, req->sender_id, obj->desc.sender_id);
1605e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1606e0b1a6d5SMarc Bonnici goto err_unlock_all;
1607e0b1a6d5SMarc Bonnici }
1608e0b1a6d5SMarc Bonnici
1609e0b1a6d5SMarc Bonnici if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1610e0b1a6d5SMarc Bonnici WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1611e0b1a6d5SMarc Bonnici __func__, req->tag, obj->desc.tag);
1612e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1613e0b1a6d5SMarc Bonnici goto err_unlock_all;
1614e0b1a6d5SMarc Bonnici }
1615e0b1a6d5SMarc Bonnici
1616f0244e5dSMarc Bonnici if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1617f0244e5dSMarc Bonnici WARN("%s: mistmatch of endpoint counts %u != %u\n",
1618f0244e5dSMarc Bonnici __func__, req->emad_count, obj->desc.emad_count);
1619f0244e5dSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1620f0244e5dSMarc Bonnici goto err_unlock_all;
1621f0244e5dSMarc Bonnici }
1622f0244e5dSMarc Bonnici
16230560b53eSMarc Bonnici /* Ensure the NS bit is set to 0 in the request. */
16240560b53eSMarc Bonnici if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
16250560b53eSMarc Bonnici WARN("%s: NS mem attributes flags MBZ.\n", __func__);
16260560b53eSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
16270560b53eSMarc Bonnici goto err_unlock_all;
16280560b53eSMarc Bonnici }
16290560b53eSMarc Bonnici
1630e0b1a6d5SMarc Bonnici if (req->flags != 0U) {
1631e0b1a6d5SMarc Bonnici if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1632e0b1a6d5SMarc Bonnici (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1633e0b1a6d5SMarc Bonnici /*
1634e0b1a6d5SMarc Bonnici * If the retrieve request specifies the memory
1635e0b1a6d5SMarc Bonnici * transaction ensure it matches what we expect.
1636e0b1a6d5SMarc Bonnici */
1637e0b1a6d5SMarc Bonnici WARN("%s: wrong mem transaction flags %x != %x\n",
1638e0b1a6d5SMarc Bonnici __func__, req->flags, obj->desc.flags);
1639e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1640e0b1a6d5SMarc Bonnici goto err_unlock_all;
1641e0b1a6d5SMarc Bonnici }
1642e0b1a6d5SMarc Bonnici
1643e0b1a6d5SMarc Bonnici if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1644e0b1a6d5SMarc Bonnici req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1645e0b1a6d5SMarc Bonnici /*
1646e0b1a6d5SMarc Bonnici * Current implementation does not support donate and
1647e0b1a6d5SMarc Bonnici * it supports no other flags.
1648e0b1a6d5SMarc Bonnici */
1649e0b1a6d5SMarc Bonnici WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1650e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1651e0b1a6d5SMarc Bonnici goto err_unlock_all;
1652e0b1a6d5SMarc Bonnici }
1653e0b1a6d5SMarc Bonnici }
1654e0b1a6d5SMarc Bonnici
1655b4c3621eSMarc Bonnici /* Validate the caller is a valid participant. */
16567d34c9bbSAndrei Homescu if (req->emad_count != 0U &&
16577d34c9bbSAndrei Homescu !spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1658b4c3621eSMarc Bonnici WARN("%s: Invalid endpoint ID (0x%x).\n",
1659b4c3621eSMarc Bonnici __func__, sp_ctx->sp_id);
1660b4c3621eSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1661b4c3621eSMarc Bonnici goto err_unlock_all;
1662b4c3621eSMarc Bonnici }
1663b4c3621eSMarc Bonnici
16647e804f96SMarc Bonnici /* Validate that the provided emad offset and structure is valid.*/
16657e804f96SMarc Bonnici for (size_t i = 0; i < req->emad_count; i++) {
16667e804f96SMarc Bonnici size_t emad_size;
16677e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad;
16687e804f96SMarc Bonnici
16697e804f96SMarc Bonnici emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
16707e804f96SMarc Bonnici &emad_size);
16717e804f96SMarc Bonnici
16727e804f96SMarc Bonnici if ((uintptr_t) emad >= (uintptr_t)
16737e804f96SMarc Bonnici ((uint8_t *) req + total_length)) {
1674f0244e5dSMarc Bonnici WARN("Invalid emad access.\n");
16757e804f96SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
16767e804f96SMarc Bonnici goto err_unlock_all;
16777e804f96SMarc Bonnici }
1678f0244e5dSMarc Bonnici }
1679f0244e5dSMarc Bonnici
1680f0244e5dSMarc Bonnici /*
1681f0244e5dSMarc Bonnici * Validate all the endpoints match in the case of multiple
1682f0244e5dSMarc Bonnici * borrowers. We don't mandate that the order of the borrowers
1683f0244e5dSMarc Bonnici * must match in the descriptors therefore check to see if the
1684f0244e5dSMarc Bonnici * endpoints match in any order.
1685f0244e5dSMarc Bonnici */
1686f0244e5dSMarc Bonnici for (size_t i = 0; i < req->emad_count; i++) {
1687f0244e5dSMarc Bonnici bool found = false;
16887e804f96SMarc Bonnici size_t emad_size;
16897e804f96SMarc Bonnici struct ffa_emad_v1_0 *emad;
16907e804f96SMarc Bonnici struct ffa_emad_v1_0 *other_emad;
16917e804f96SMarc Bonnici
16927e804f96SMarc Bonnici emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
16937e804f96SMarc Bonnici &emad_size);
1694f0244e5dSMarc Bonnici
1695f0244e5dSMarc Bonnici for (size_t j = 0; j < obj->desc.emad_count; j++) {
16967e804f96SMarc Bonnici other_emad = spmc_shmem_obj_get_emad(
16977e804f96SMarc Bonnici &obj->desc, j, MAKE_FFA_VERSION(1, 1),
16987e804f96SMarc Bonnici &emad_size);
16997e804f96SMarc Bonnici
17007e804f96SMarc Bonnici if (req->emad_count &&
17017e804f96SMarc Bonnici emad->mapd.endpoint_id ==
17027e804f96SMarc Bonnici other_emad->mapd.endpoint_id) {
1703f0244e5dSMarc Bonnici found = true;
1704f0244e5dSMarc Bonnici break;
1705f0244e5dSMarc Bonnici }
1706f0244e5dSMarc Bonnici }
1707f0244e5dSMarc Bonnici
1708f0244e5dSMarc Bonnici if (!found) {
1709f0244e5dSMarc Bonnici WARN("%s: invalid receiver id (0x%x).\n",
17107e804f96SMarc Bonnici __func__, emad->mapd.endpoint_id);
1711e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1712e0b1a6d5SMarc Bonnici goto err_unlock_all;
1713e0b1a6d5SMarc Bonnici }
1714f0244e5dSMarc Bonnici }
1715e0b1a6d5SMarc Bonnici
1716e0b1a6d5SMarc Bonnici mbox->state = MAILBOX_STATE_FULL;
1717e0b1a6d5SMarc Bonnici
1718e0b1a6d5SMarc Bonnici if (req->emad_count != 0U) {
1719e0b1a6d5SMarc Bonnici obj->in_use++;
1720e0b1a6d5SMarc Bonnici }
1721e0b1a6d5SMarc Bonnici
17227e804f96SMarc Bonnici /*
17237e804f96SMarc Bonnici * If the caller is v1.0 convert the descriptor, otherwise copy
17247e804f96SMarc Bonnici * directly.
17257e804f96SMarc Bonnici */
17267d34c9bbSAndrei Homescu if (req->emad_count == 0U) {
17277d34c9bbSAndrei Homescu /*
17287d34c9bbSAndrei Homescu * We should only get here from the hypervisor per
17297d34c9bbSAndrei Homescu * the checks above, but verify once again to be sure.
17307d34c9bbSAndrei Homescu */
17317d34c9bbSAndrei Homescu assert(!secure_origin);
17327d34c9bbSAndrei Homescu
17337d34c9bbSAndrei Homescu ret = spmc_populate_ffa_hyp_descriptor(resp, obj, buf_size, ffa_version,
17347d34c9bbSAndrei Homescu ©_size, &out_desc_size);
17357d34c9bbSAndrei Homescu if (ret != 0U) {
17367d34c9bbSAndrei Homescu ERROR("%s: Failed to process descriptor.\n", __func__);
17377d34c9bbSAndrei Homescu goto err_unlock_all;
17387d34c9bbSAndrei Homescu }
17397d34c9bbSAndrei Homescu } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
17407e804f96SMarc Bonnici ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
17417e804f96SMarc Bonnici ©_size,
17427e804f96SMarc Bonnici &out_desc_size);
17437e804f96SMarc Bonnici if (ret != 0U) {
17447e804f96SMarc Bonnici ERROR("%s: Failed to process descriptor.\n", __func__);
17457e804f96SMarc Bonnici goto err_unlock_all;
17467e804f96SMarc Bonnici }
17477e804f96SMarc Bonnici } else {
1748e0b1a6d5SMarc Bonnici copy_size = MIN(obj->desc_size, buf_size);
17497e804f96SMarc Bonnici out_desc_size = obj->desc_size;
1750e0b1a6d5SMarc Bonnici
1751e0b1a6d5SMarc Bonnici memcpy(resp, &obj->desc, copy_size);
17527e804f96SMarc Bonnici }
1753e0b1a6d5SMarc Bonnici
1754c55b519eSAndrei Homescu /* Update the RX fragment state */
1755c55b519eSAndrei Homescu mbox->last_rx_fragment_offset = 0;
1756c55b519eSAndrei Homescu mbox->next_rx_fragment_offset = copy_size;
1757c55b519eSAndrei Homescu
1758cb2e5746SAndrei Homescu /* Update the NS bit in the response if applicable. */
1759cb2e5746SAndrei Homescu spmc_ffa_mem_retrieve_update_ns_bit(resp, sp_ctx, secure_origin);
17600560b53eSMarc Bonnici
1761e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1762e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1763e0b1a6d5SMarc Bonnici
17647e804f96SMarc Bonnici SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1765e0b1a6d5SMarc Bonnici copy_size, 0, 0, 0, 0, 0);
1766e0b1a6d5SMarc Bonnici
1767e0b1a6d5SMarc Bonnici err_unlock_all:
1768e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1769e0b1a6d5SMarc Bonnici err_unlock_mailbox:
1770e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1771e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
1772e0b1a6d5SMarc Bonnici }
1773e0b1a6d5SMarc Bonnici
1774e0b1a6d5SMarc Bonnici /**
1775e0b1a6d5SMarc Bonnici * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1776e0b1a6d5SMarc Bonnici * @client: Client state.
1777e0b1a6d5SMarc Bonnici * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1778e0b1a6d5SMarc Bonnici * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1779e0b1a6d5SMarc Bonnici * @fragment_offset: Byte offset in descriptor to resume at.
1780e0b1a6d5SMarc Bonnici * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1781e0b1a6d5SMarc Bonnici * hypervisor. 0 otherwise.
1782e0b1a6d5SMarc Bonnici * @smc_handle: Handle passed to smc call. Used to return
1783e0b1a6d5SMarc Bonnici * FFA_MEM_FRAG_TX.
1784e0b1a6d5SMarc Bonnici *
1785e0b1a6d5SMarc Bonnici * Return: @smc_handle on success, error code on failure.
1786e0b1a6d5SMarc Bonnici */
spmc_ffa_mem_frag_rx(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1787e0b1a6d5SMarc Bonnici long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1788e0b1a6d5SMarc Bonnici bool secure_origin,
1789e0b1a6d5SMarc Bonnici uint32_t handle_low,
1790e0b1a6d5SMarc Bonnici uint32_t handle_high,
1791e0b1a6d5SMarc Bonnici uint32_t fragment_offset,
1792e0b1a6d5SMarc Bonnici uint32_t sender_id,
1793e0b1a6d5SMarc Bonnici void *cookie,
1794e0b1a6d5SMarc Bonnici void *handle,
1795e0b1a6d5SMarc Bonnici uint64_t flags)
1796e0b1a6d5SMarc Bonnici {
1797e0b1a6d5SMarc Bonnici int ret;
1798e0b1a6d5SMarc Bonnici void *src;
1799e0b1a6d5SMarc Bonnici size_t buf_size;
1800e0b1a6d5SMarc Bonnici size_t copy_size;
1801e0b1a6d5SMarc Bonnici size_t full_copy_size;
1802e0b1a6d5SMarc Bonnici uint32_t desc_sender_id;
1803e0b1a6d5SMarc Bonnici struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1804e0b1a6d5SMarc Bonnici uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1805e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
18067e804f96SMarc Bonnici uint32_t ffa_version = get_partition_ffa_version(secure_origin);
18077d34c9bbSAndrei Homescu uint32_t actual_fragment_offset;
1808e0b1a6d5SMarc Bonnici
1809e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
1810e0b1a6d5SMarc Bonnici
1811e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1812e0b1a6d5SMarc Bonnici if (obj == NULL) {
1813e0b1a6d5SMarc Bonnici WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1814e0b1a6d5SMarc Bonnici __func__, mem_handle);
1815e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1816e0b1a6d5SMarc Bonnici goto err_unlock_shmem;
1817e0b1a6d5SMarc Bonnici }
1818e0b1a6d5SMarc Bonnici
1819e0b1a6d5SMarc Bonnici desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1820e0b1a6d5SMarc Bonnici if (sender_id != 0U && sender_id != desc_sender_id) {
1821e0b1a6d5SMarc Bonnici WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1822e0b1a6d5SMarc Bonnici sender_id, desc_sender_id);
1823e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1824e0b1a6d5SMarc Bonnici goto err_unlock_shmem;
1825e0b1a6d5SMarc Bonnici }
1826e0b1a6d5SMarc Bonnici
18277d34c9bbSAndrei Homescu actual_fragment_offset = fragment_offset;
18287d34c9bbSAndrei Homescu if (!secure_origin) {
18297d34c9bbSAndrei Homescu /* Apply the hypervisor shift if the request came from NS */
18307d34c9bbSAndrei Homescu actual_fragment_offset += obj->hyp_shift;
18317d34c9bbSAndrei Homescu }
18327d34c9bbSAndrei Homescu
18337d34c9bbSAndrei Homescu if (actual_fragment_offset >= obj->desc_size) {
18347d34c9bbSAndrei Homescu WARN("%s: invalid fragment_offset 0x%x actual 0x%x >= 0x%zx\n",
18357d34c9bbSAndrei Homescu __func__, fragment_offset, actual_fragment_offset, obj->desc_size);
1836e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1837e0b1a6d5SMarc Bonnici goto err_unlock_shmem;
1838e0b1a6d5SMarc Bonnici }
1839e0b1a6d5SMarc Bonnici
1840e0b1a6d5SMarc Bonnici spin_lock(&mbox->lock);
1841e0b1a6d5SMarc Bonnici
1842e0b1a6d5SMarc Bonnici if (mbox->rxtx_page_count == 0U) {
1843e0b1a6d5SMarc Bonnici WARN("%s: buffer pair not registered.\n", __func__);
1844e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1845e0b1a6d5SMarc Bonnici goto err_unlock_all;
1846e0b1a6d5SMarc Bonnici }
1847e0b1a6d5SMarc Bonnici
1848e0b1a6d5SMarc Bonnici if (mbox->state != MAILBOX_STATE_EMPTY) {
1849e0b1a6d5SMarc Bonnici WARN("%s: RX Buffer is full!\n", __func__);
1850e0b1a6d5SMarc Bonnici ret = FFA_ERROR_DENIED;
1851e0b1a6d5SMarc Bonnici goto err_unlock_all;
1852e0b1a6d5SMarc Bonnici }
1853e0b1a6d5SMarc Bonnici
1854c55b519eSAndrei Homescu if (fragment_offset != mbox->last_rx_fragment_offset &&
1855c55b519eSAndrei Homescu fragment_offset != mbox->next_rx_fragment_offset) {
1856c55b519eSAndrei Homescu WARN("%s: invalid fragment_offset 0x%x expected 0x%x or 0x%x\n",
1857c55b519eSAndrei Homescu __func__, fragment_offset, mbox->last_rx_fragment_offset,
1858c55b519eSAndrei Homescu mbox->next_rx_fragment_offset);
1859c55b519eSAndrei Homescu ret = FFA_ERROR_INVALID_PARAMETER;
1860c55b519eSAndrei Homescu goto err_unlock_all;
1861c55b519eSAndrei Homescu }
1862c55b519eSAndrei Homescu
1863e0b1a6d5SMarc Bonnici buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1864e0b1a6d5SMarc Bonnici
1865e0b1a6d5SMarc Bonnici mbox->state = MAILBOX_STATE_FULL;
1866e0b1a6d5SMarc Bonnici
18677e804f96SMarc Bonnici /*
18687d34c9bbSAndrei Homescu * If we are handling the "Support for retrieval by hypervisor" case,
18697d34c9bbSAndrei Homescu * return the specially constructed single-EMAD descriptor. In all other cases,
18707d34c9bbSAndrei Homescu * if the caller is v1.0 convert the descriptor, otherwise copy directly.
18717e804f96SMarc Bonnici */
18727d34c9bbSAndrei Homescu if (!secure_origin && fragment_offset == 0U) {
18737d34c9bbSAndrei Homescu size_t out_desc_size;
18747d34c9bbSAndrei Homescu
18757d34c9bbSAndrei Homescu /*
18767d34c9bbSAndrei Homescu * The caller requested a retransmit of the initial fragment.
18777d34c9bbSAndrei Homescu * Rebuild it here from scratch since we do not have
18787d34c9bbSAndrei Homescu * it stored anywhere.
18797d34c9bbSAndrei Homescu */
18807d34c9bbSAndrei Homescu ret = spmc_populate_ffa_hyp_descriptor(mbox->rx_buffer, obj,
18817d34c9bbSAndrei Homescu buf_size, ffa_version,
18827d34c9bbSAndrei Homescu ©_size, &out_desc_size);
18837d34c9bbSAndrei Homescu if (ret != 0U) {
18847d34c9bbSAndrei Homescu ERROR("%s: Failed to process descriptor.\n", __func__);
18857d34c9bbSAndrei Homescu goto err_unlock_all;
18867d34c9bbSAndrei Homescu }
18877d34c9bbSAndrei Homescu } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
18887e804f96SMarc Bonnici size_t out_desc_size;
18897e804f96SMarc Bonnici
18907e804f96SMarc Bonnici ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
18917e804f96SMarc Bonnici buf_size,
18927d34c9bbSAndrei Homescu actual_fragment_offset,
18937e804f96SMarc Bonnici ©_size,
18947e804f96SMarc Bonnici &out_desc_size);
18957e804f96SMarc Bonnici if (ret != 0U) {
18967e804f96SMarc Bonnici ERROR("%s: Failed to process descriptor.\n", __func__);
18977e804f96SMarc Bonnici goto err_unlock_all;
18987e804f96SMarc Bonnici }
18997e804f96SMarc Bonnici } else {
19007d34c9bbSAndrei Homescu full_copy_size = obj->desc_size - actual_fragment_offset;
1901e0b1a6d5SMarc Bonnici copy_size = MIN(full_copy_size, buf_size);
1902e0b1a6d5SMarc Bonnici
1903e0b1a6d5SMarc Bonnici src = &obj->desc;
1904e0b1a6d5SMarc Bonnici
19057d34c9bbSAndrei Homescu memcpy(mbox->rx_buffer, src + actual_fragment_offset, copy_size);
19067e804f96SMarc Bonnici }
1907e0b1a6d5SMarc Bonnici
1908c55b519eSAndrei Homescu mbox->last_rx_fragment_offset = fragment_offset;
1909c55b519eSAndrei Homescu mbox->next_rx_fragment_offset = fragment_offset + copy_size;
1910c55b519eSAndrei Homescu
1911e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1912e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1913e0b1a6d5SMarc Bonnici
1914e0b1a6d5SMarc Bonnici SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1915e0b1a6d5SMarc Bonnici copy_size, sender_id, 0, 0, 0);
1916e0b1a6d5SMarc Bonnici
1917e0b1a6d5SMarc Bonnici err_unlock_all:
1918e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
1919e0b1a6d5SMarc Bonnici err_unlock_shmem:
1920e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
1921e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
1922e0b1a6d5SMarc Bonnici }
1923e0b1a6d5SMarc Bonnici
1924e0b1a6d5SMarc Bonnici /**
1925e0b1a6d5SMarc Bonnici * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1926e0b1a6d5SMarc Bonnici * @client: Client state.
1927e0b1a6d5SMarc Bonnici *
1928e0b1a6d5SMarc Bonnici * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1929e0b1a6d5SMarc Bonnici * Used by secure os release previously shared memory to non-secure os.
1930e0b1a6d5SMarc Bonnici *
1931e0b1a6d5SMarc Bonnici * The handle to release must be in the client's (secure os's) transmit buffer.
1932e0b1a6d5SMarc Bonnici *
1933e0b1a6d5SMarc Bonnici * Return: 0 on success, error code on failure.
1934e0b1a6d5SMarc Bonnici */
spmc_ffa_mem_relinquish(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1935e0b1a6d5SMarc Bonnici int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1936e0b1a6d5SMarc Bonnici bool secure_origin,
1937e0b1a6d5SMarc Bonnici uint32_t handle_low,
1938e0b1a6d5SMarc Bonnici uint32_t handle_high,
1939e0b1a6d5SMarc Bonnici uint32_t fragment_offset,
1940e0b1a6d5SMarc Bonnici uint32_t sender_id,
1941e0b1a6d5SMarc Bonnici void *cookie,
1942e0b1a6d5SMarc Bonnici void *handle,
1943e0b1a6d5SMarc Bonnici uint64_t flags)
1944e0b1a6d5SMarc Bonnici {
1945e0b1a6d5SMarc Bonnici int ret;
1946e0b1a6d5SMarc Bonnici struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1947e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
1948e0b1a6d5SMarc Bonnici const struct ffa_mem_relinquish_descriptor *req;
1949b4c3621eSMarc Bonnici struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1950e0b1a6d5SMarc Bonnici
1951e0b1a6d5SMarc Bonnici if (!secure_origin) {
1952e0b1a6d5SMarc Bonnici WARN("%s: unsupported relinquish direction.\n", __func__);
1953e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
1954e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
1955e0b1a6d5SMarc Bonnici }
1956e0b1a6d5SMarc Bonnici
1957e0b1a6d5SMarc Bonnici spin_lock(&mbox->lock);
1958e0b1a6d5SMarc Bonnici
1959e0b1a6d5SMarc Bonnici if (mbox->rxtx_page_count == 0U) {
1960e0b1a6d5SMarc Bonnici WARN("%s: buffer pair not registered.\n", __func__);
1961e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1962e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1963e0b1a6d5SMarc Bonnici }
1964e0b1a6d5SMarc Bonnici
1965e0b1a6d5SMarc Bonnici req = mbox->tx_buffer;
1966e0b1a6d5SMarc Bonnici
1967e0b1a6d5SMarc Bonnici if (req->flags != 0U) {
1968e0b1a6d5SMarc Bonnici WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1969e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1970e0b1a6d5SMarc Bonnici goto err_unlock_mailbox;
1971e0b1a6d5SMarc Bonnici }
1972e0b1a6d5SMarc Bonnici
1973f0244e5dSMarc Bonnici if (req->endpoint_count == 0) {
1974f0244e5dSMarc Bonnici WARN("%s: endpoint count cannot be 0.\n", __func__);
1975f0244e5dSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1976f0244e5dSMarc Bonnici goto err_unlock_mailbox;
1977f0244e5dSMarc Bonnici }
1978f0244e5dSMarc Bonnici
1979e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
1980e0b1a6d5SMarc Bonnici
1981e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1982e0b1a6d5SMarc Bonnici if (obj == NULL) {
1983e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1984e0b1a6d5SMarc Bonnici goto err_unlock_all;
1985e0b1a6d5SMarc Bonnici }
1986e0b1a6d5SMarc Bonnici
1987b4c3621eSMarc Bonnici /*
1988b4c3621eSMarc Bonnici * Validate the endpoint ID was populated correctly. We don't currently
1989b4c3621eSMarc Bonnici * support proxy endpoints so the endpoint count should always be 1.
1990b4c3621eSMarc Bonnici */
1991b4c3621eSMarc Bonnici if (req->endpoint_count != 1U) {
1992b4c3621eSMarc Bonnici WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1993b4c3621eSMarc Bonnici req->endpoint_count);
1994e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
1995e0b1a6d5SMarc Bonnici goto err_unlock_all;
1996e0b1a6d5SMarc Bonnici }
1997f0244e5dSMarc Bonnici
1998b4c3621eSMarc Bonnici /* Validate provided endpoint ID matches the partition ID. */
1999b4c3621eSMarc Bonnici if (req->endpoint_array[0] != sp_ctx->sp_id) {
2000b4c3621eSMarc Bonnici WARN("%s: invalid endpoint ID %u != %u\n", __func__,
2001b4c3621eSMarc Bonnici req->endpoint_array[0], sp_ctx->sp_id);
2002b4c3621eSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
2003b4c3621eSMarc Bonnici goto err_unlock_all;
2004f0244e5dSMarc Bonnici }
2005f0244e5dSMarc Bonnici
2006b4c3621eSMarc Bonnici /* Validate the caller is a valid participant. */
20071543d17bSShruti Gupta if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
2008f0244e5dSMarc Bonnici WARN("%s: Invalid endpoint ID (0x%x).\n",
2009b4c3621eSMarc Bonnici __func__, req->endpoint_array[0]);
2010e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
2011e0b1a6d5SMarc Bonnici goto err_unlock_all;
2012e0b1a6d5SMarc Bonnici }
2013f0244e5dSMarc Bonnici
2014e0b1a6d5SMarc Bonnici if (obj->in_use == 0U) {
2015e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
2016e0b1a6d5SMarc Bonnici goto err_unlock_all;
2017e0b1a6d5SMarc Bonnici }
2018e0b1a6d5SMarc Bonnici obj->in_use--;
2019e0b1a6d5SMarc Bonnici
2020e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
2021e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
2022e0b1a6d5SMarc Bonnici
2023e0b1a6d5SMarc Bonnici SMC_RET1(handle, FFA_SUCCESS_SMC32);
2024e0b1a6d5SMarc Bonnici
2025e0b1a6d5SMarc Bonnici err_unlock_all:
2026e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
2027e0b1a6d5SMarc Bonnici err_unlock_mailbox:
2028e0b1a6d5SMarc Bonnici spin_unlock(&mbox->lock);
2029e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
2030e0b1a6d5SMarc Bonnici }
2031e0b1a6d5SMarc Bonnici
2032e0b1a6d5SMarc Bonnici /**
2033e0b1a6d5SMarc Bonnici * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
2034e0b1a6d5SMarc Bonnici * @client: Client state.
2035e0b1a6d5SMarc Bonnici * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
2036e0b1a6d5SMarc Bonnici * @handle_high: Unique handle of shared memory object to reclaim.
2037e0b1a6d5SMarc Bonnici * Bit[63:32].
2038e0b1a6d5SMarc Bonnici * @flags: Unsupported, ignored.
2039e0b1a6d5SMarc Bonnici *
2040e0b1a6d5SMarc Bonnici * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
2041e0b1a6d5SMarc Bonnici * Used by non-secure os reclaim memory previously shared with secure os.
2042e0b1a6d5SMarc Bonnici *
2043e0b1a6d5SMarc Bonnici * Return: 0 on success, error code on failure.
2044e0b1a6d5SMarc Bonnici */
spmc_ffa_mem_reclaim(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t mem_flags,uint64_t x4,void * cookie,void * handle,uint64_t flags)2045e0b1a6d5SMarc Bonnici int spmc_ffa_mem_reclaim(uint32_t smc_fid,
2046e0b1a6d5SMarc Bonnici bool secure_origin,
2047e0b1a6d5SMarc Bonnici uint32_t handle_low,
2048e0b1a6d5SMarc Bonnici uint32_t handle_high,
2049e0b1a6d5SMarc Bonnici uint32_t mem_flags,
2050e0b1a6d5SMarc Bonnici uint64_t x4,
2051e0b1a6d5SMarc Bonnici void *cookie,
2052e0b1a6d5SMarc Bonnici void *handle,
2053e0b1a6d5SMarc Bonnici uint64_t flags)
2054e0b1a6d5SMarc Bonnici {
2055e0b1a6d5SMarc Bonnici int ret;
2056e0b1a6d5SMarc Bonnici struct spmc_shmem_obj *obj;
2057e0b1a6d5SMarc Bonnici uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
2058e0b1a6d5SMarc Bonnici
2059e0b1a6d5SMarc Bonnici if (secure_origin) {
2060e0b1a6d5SMarc Bonnici WARN("%s: unsupported reclaim direction.\n", __func__);
2061e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
2062e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
2063e0b1a6d5SMarc Bonnici }
2064e0b1a6d5SMarc Bonnici
2065e0b1a6d5SMarc Bonnici if (mem_flags != 0U) {
2066e0b1a6d5SMarc Bonnici WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
2067e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle,
2068e0b1a6d5SMarc Bonnici FFA_ERROR_INVALID_PARAMETER);
2069e0b1a6d5SMarc Bonnici }
2070e0b1a6d5SMarc Bonnici
2071e0b1a6d5SMarc Bonnici spin_lock(&spmc_shmem_obj_state.lock);
2072e0b1a6d5SMarc Bonnici
2073e0b1a6d5SMarc Bonnici obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
2074e0b1a6d5SMarc Bonnici if (obj == NULL) {
2075e0b1a6d5SMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
2076e0b1a6d5SMarc Bonnici goto err_unlock;
2077e0b1a6d5SMarc Bonnici }
2078e0b1a6d5SMarc Bonnici if (obj->in_use != 0U) {
2079e0b1a6d5SMarc Bonnici ret = FFA_ERROR_DENIED;
2080e0b1a6d5SMarc Bonnici goto err_unlock;
2081e0b1a6d5SMarc Bonnici }
2082a8be4cd0SMarc Bonnici
2083c4adbe6eSMarc Bonnici if (obj->desc_filled != obj->desc_size) {
2084c4adbe6eSMarc Bonnici WARN("%s: incomplete object desc filled %zu < size %zu\n",
2085c4adbe6eSMarc Bonnici __func__, obj->desc_filled, obj->desc_size);
2086c4adbe6eSMarc Bonnici ret = FFA_ERROR_INVALID_PARAMETER;
2087c4adbe6eSMarc Bonnici goto err_unlock;
2088c4adbe6eSMarc Bonnici }
2089c4adbe6eSMarc Bonnici
2090a8be4cd0SMarc Bonnici /* Allow for platform specific operations to be performed. */
2091a8be4cd0SMarc Bonnici ret = plat_spmc_shmem_reclaim(&obj->desc);
2092a8be4cd0SMarc Bonnici if (ret != 0) {
2093a8be4cd0SMarc Bonnici goto err_unlock;
2094a8be4cd0SMarc Bonnici }
2095a8be4cd0SMarc Bonnici
2096e0b1a6d5SMarc Bonnici spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
2097e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
2098e0b1a6d5SMarc Bonnici
2099e0b1a6d5SMarc Bonnici SMC_RET1(handle, FFA_SUCCESS_SMC32);
2100e0b1a6d5SMarc Bonnici
2101e0b1a6d5SMarc Bonnici err_unlock:
2102e0b1a6d5SMarc Bonnici spin_unlock(&spmc_shmem_obj_state.lock);
2103e0b1a6d5SMarc Bonnici return spmc_ffa_error_return(handle, ret);
2104e0b1a6d5SMarc Bonnici }
2105