1 /*
2 * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6 #include <assert.h>
7 #include <errno.h>
8 #include <inttypes.h>
9
10 #include <common/debug.h>
11 #include <common/runtime_svc.h>
12 #include <lib/object_pool.h>
13 #include <lib/spinlock.h>
14 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include <services/ffa_svc.h>
16 #include "spmc.h"
17 #include "spmc_shared_mem.h"
18
19 #include <platform_def.h>
20
21 /**
22 * struct spmc_shmem_obj - Shared memory object.
23 * @desc_size: Size of @desc.
24 * @desc_filled: Size of @desc already received.
25 * @in_use: Number of clients that have called ffa_mem_retrieve_req
26 * without a matching ffa_mem_relinquish call.
27 * @hyp_shift: If the last ffa_mem_retrieve_req came from a hypervisor
28 * on its own behalf, shift the fragment offset in the
29 * descriptor forward by this amount to get the correct
30 * position of the next fragment.
31 * @desc: FF-A memory region descriptor passed in ffa_mem_share.
32 */
33 struct spmc_shmem_obj {
34 size_t desc_size;
35 size_t desc_filled;
36 size_t in_use;
37 ssize_t hyp_shift;
38 struct ffa_mtd desc;
39 };
40
41 /*
42 * Declare our data structure to store the metadata of memory share requests.
43 * The main datastore is allocated on a per platform basis to ensure enough
44 * storage can be made available.
45 * The address of the data store will be populated by the SPMC during its
46 * initialization.
47 */
48
49 struct spmc_shmem_obj_state spmc_shmem_obj_state = {
50 /* Set start value for handle so top 32 bits are needed quickly. */
51 .next_handle = 0xffffffc0U,
52 };
53
54 /**
55 * spmc_shmem_obj_size - Convert from descriptor size to object size.
56 * @desc_size: Size of struct ffa_memory_region_descriptor object.
57 *
58 * Return: Size of struct spmc_shmem_obj object.
59 */
spmc_shmem_obj_size(size_t desc_size)60 static size_t spmc_shmem_obj_size(size_t desc_size)
61 {
62 return desc_size + offsetof(struct spmc_shmem_obj, desc);
63 }
64
65 /**
66 * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
67 * @state: Global state.
68 * @desc_size: Size of struct ffa_memory_region_descriptor object that
69 * allocated object will hold.
70 *
71 * Return: Pointer to newly allocated object, or %NULL if there not enough space
72 * left. The returned pointer is only valid while @state is locked, to
73 * used it again after unlocking @state, spmc_shmem_obj_lookup must be
74 * called.
75 */
76 static struct spmc_shmem_obj *
spmc_shmem_obj_alloc(struct spmc_shmem_obj_state * state,size_t desc_size)77 spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
78 {
79 struct spmc_shmem_obj *obj;
80 size_t free = state->data_size - state->allocated;
81 size_t obj_size;
82
83 if (state->data == NULL) {
84 ERROR("Missing shmem datastore!\n");
85 return NULL;
86 }
87
88 /* Ensure that descriptor size is aligned */
89 if (!is_aligned(desc_size, 16)) {
90 WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
91 __func__, desc_size);
92 return NULL;
93 }
94
95 obj_size = spmc_shmem_obj_size(desc_size);
96
97 /* Ensure the obj size has not overflowed. */
98 if (obj_size < desc_size) {
99 WARN("%s(0x%zx) desc_size overflow\n",
100 __func__, desc_size);
101 return NULL;
102 }
103
104 if (obj_size > free) {
105 WARN("%s(0x%zx) failed, free 0x%zx\n",
106 __func__, desc_size, free);
107 return NULL;
108 }
109 obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
110 obj->desc = (struct ffa_mtd) {0};
111 obj->desc_size = desc_size;
112 obj->desc_filled = 0;
113 obj->in_use = 0;
114 obj->hyp_shift = 0;
115 state->allocated += obj_size;
116 return obj;
117 }
118
119 /**
120 * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
121 * @state: Global state.
122 * @obj: Object to free.
123 *
124 * Release memory used by @obj. Other objects may move, so on return all
125 * pointers to struct spmc_shmem_obj object should be considered invalid, not
126 * just @obj.
127 *
128 * The current implementation always compacts the remaining objects to simplify
129 * the allocator and to avoid fragmentation.
130 */
131
spmc_shmem_obj_free(struct spmc_shmem_obj_state * state,struct spmc_shmem_obj * obj)132 static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
133 struct spmc_shmem_obj *obj)
134 {
135 size_t free_size = spmc_shmem_obj_size(obj->desc_size);
136 uint8_t *shift_dest = (uint8_t *)obj;
137 uint8_t *shift_src = shift_dest + free_size;
138 size_t shift_size = state->allocated - (shift_src - state->data);
139
140 if (shift_size != 0U) {
141 memmove(shift_dest, shift_src, shift_size);
142 }
143 state->allocated -= free_size;
144 }
145
146 /**
147 * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
148 * @state: Global state.
149 * @handle: Unique handle of object to return.
150 *
151 * Return: struct spmc_shmem_obj_state object with handle matching @handle.
152 * %NULL, if not object in @state->data has a matching handle.
153 */
154 static struct spmc_shmem_obj *
spmc_shmem_obj_lookup(struct spmc_shmem_obj_state * state,uint64_t handle)155 spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
156 {
157 uint8_t *curr = state->data;
158
159 while (curr - state->data < state->allocated) {
160 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
161
162 if (obj->desc.handle == handle) {
163 return obj;
164 }
165 curr += spmc_shmem_obj_size(obj->desc_size);
166 }
167 return NULL;
168 }
169
170 /**
171 * spmc_shmem_obj_get_next - Get the next memory object from an offset.
172 * @offset: Offset used to track which objects have previously been
173 * returned.
174 *
175 * Return: the next struct spmc_shmem_obj_state object from the provided
176 * offset.
177 * %NULL, if there are no more objects.
178 */
179 static struct spmc_shmem_obj *
spmc_shmem_obj_get_next(struct spmc_shmem_obj_state * state,size_t * offset)180 spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
181 {
182 uint8_t *curr = state->data + *offset;
183
184 if (curr - state->data < state->allocated) {
185 struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
186
187 *offset += spmc_shmem_obj_size(obj->desc_size);
188
189 return obj;
190 }
191 return NULL;
192 }
193
194 /*******************************************************************************
195 * FF-A memory descriptor helper functions.
196 ******************************************************************************/
197 /**
198 * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
199 * clients FF-A version.
200 * @desc: The memory transaction descriptor.
201 * @index: The index of the emad element to be accessed.
202 * @ffa_version: FF-A version of the provided structure.
203 * @emad_size: Will be populated with the size of the returned emad
204 * descriptor.
205 * Return: A pointer to the requested emad structure.
206 */
207 static void *
spmc_shmem_obj_get_emad(const struct ffa_mtd * desc,uint32_t index,uint32_t ffa_version,size_t * emad_size)208 spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
209 uint32_t ffa_version, size_t *emad_size)
210 {
211 uint8_t *emad;
212
213 assert(index < desc->emad_count);
214
215 /*
216 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
217 * format, otherwise assume it is a v1.1 format.
218 */
219 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
220 emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
221 *emad_size = sizeof(struct ffa_emad_v1_0);
222 } else {
223 assert(is_aligned(desc->emad_offset, 16));
224 emad = ((uint8_t *) desc + desc->emad_offset);
225 *emad_size = desc->emad_size;
226 }
227
228 assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
229 return (emad + (*emad_size * index));
230 }
231
232 /**
233 * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
234 * FF-A version of the descriptor.
235 * @obj: Object containing ffa_memory_region_descriptor.
236 *
237 * Return: struct ffa_comp_mrd object corresponding to the composite memory
238 * region descriptor.
239 */
240 static struct ffa_comp_mrd *
spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj * obj,uint32_t ffa_version)241 spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
242 {
243 size_t emad_size;
244 /*
245 * The comp_mrd_offset field of the emad descriptor remains consistent
246 * between FF-A versions therefore we can use the v1.0 descriptor here
247 * in all cases.
248 */
249 struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
250 ffa_version,
251 &emad_size);
252
253 /* Ensure the composite descriptor offset is aligned. */
254 if (!is_aligned(emad->comp_mrd_offset, 8)) {
255 WARN("Unaligned composite memory region descriptor offset.\n");
256 return NULL;
257 }
258
259 return (struct ffa_comp_mrd *)
260 ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
261 }
262
263 /**
264 * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
265 * a given memory transaction.
266 * @sp_id: Partition ID to validate.
267 * @obj: The shared memory object containing the descriptor
268 * of the memory transaction.
269 * Return: true if ID is valid, else false.
270 */
spmc_shmem_obj_validate_id(struct spmc_shmem_obj * obj,uint16_t sp_id)271 bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
272 {
273 bool found = false;
274 struct ffa_mtd *desc = &obj->desc;
275 size_t desc_size = obj->desc_size;
276
277 /* Validate the partition is a valid participant. */
278 for (unsigned int i = 0U; i < desc->emad_count; i++) {
279 size_t emad_size;
280 struct ffa_emad_v1_0 *emad;
281
282 emad = spmc_shmem_obj_get_emad(desc, i,
283 MAKE_FFA_VERSION(1, 1),
284 &emad_size);
285 /*
286 * Validate the calculated emad address resides within the
287 * descriptor.
288 */
289 if ((emad == NULL) || (uintptr_t) emad >=
290 (uintptr_t)((uint8_t *) desc + desc_size)) {
291 VERBOSE("Invalid emad.\n");
292 break;
293 }
294 if (sp_id == emad->mapd.endpoint_id) {
295 found = true;
296 break;
297 }
298 }
299 return found;
300 }
301
302 /*
303 * Compare two memory regions to determine if any range overlaps with another
304 * ongoing memory transaction.
305 */
306 static bool
overlapping_memory_regions(struct ffa_comp_mrd * region1,struct ffa_comp_mrd * region2)307 overlapping_memory_regions(struct ffa_comp_mrd *region1,
308 struct ffa_comp_mrd *region2)
309 {
310 uint64_t region1_start;
311 uint64_t region1_size;
312 uint64_t region1_end;
313 uint64_t region2_start;
314 uint64_t region2_size;
315 uint64_t region2_end;
316
317 assert(region1 != NULL);
318 assert(region2 != NULL);
319
320 if (region1 == region2) {
321 return true;
322 }
323
324 /*
325 * Check each memory region in the request against existing
326 * transactions.
327 */
328 for (size_t i = 0; i < region1->address_range_count; i++) {
329
330 region1_start = region1->address_range_array[i].address;
331 region1_size =
332 region1->address_range_array[i].page_count *
333 PAGE_SIZE_4KB;
334 region1_end = region1_start + region1_size;
335
336 for (size_t j = 0; j < region2->address_range_count; j++) {
337
338 region2_start = region2->address_range_array[j].address;
339 region2_size =
340 region2->address_range_array[j].page_count *
341 PAGE_SIZE_4KB;
342 region2_end = region2_start + region2_size;
343
344 /* Check if regions are not overlapping. */
345 if (!((region2_end <= region1_start) ||
346 (region1_end <= region2_start))) {
347 WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
348 region1_start, region1_end,
349 region2_start, region2_end);
350 return true;
351 }
352 }
353 }
354 return false;
355 }
356
357 /*******************************************************************************
358 * FF-A v1.0 Memory Descriptor Conversion Helpers.
359 ******************************************************************************/
360 /**
361 * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
362 * converted descriptor.
363 * @orig: The original v1.0 memory transaction descriptor.
364 * @desc_size: The size of the original v1.0 memory transaction descriptor.
365 *
366 * Return: the size required to store the descriptor store in the v1.1 format.
367 */
368 static uint64_t
spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 * orig,size_t desc_size)369 spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
370 {
371 uint64_t size = 0;
372 struct ffa_comp_mrd *mrd;
373 struct ffa_emad_v1_0 *emad_array = orig->emad;
374
375 /* Get the size of the v1.1 descriptor. */
376 size += sizeof(struct ffa_mtd);
377
378 /* Add the size of the emad descriptors. */
379 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
380
381 /* Add the size of the composite mrds. */
382 size += sizeof(struct ffa_comp_mrd);
383
384 /* Add the size of the constituent mrds. */
385 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
386 emad_array[0].comp_mrd_offset);
387
388 /* Add the size of the memory region descriptors. */
389 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
390
391 return size;
392 }
393
394 /**
395 * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
396 * converted descriptor.
397 * @orig: The original v1.1 memory transaction descriptor.
398 * @desc_size: The size of the original v1.1 memory transaction descriptor.
399 *
400 * Return: the size required to store the descriptor store in the v1.0 format.
401 */
402 static size_t
spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd * orig,size_t desc_size)403 spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
404 {
405 size_t size = 0;
406 struct ffa_comp_mrd *mrd;
407 struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
408 ((uint8_t *) orig +
409 orig->emad_offset);
410
411 /* Get the size of the v1.0 descriptor. */
412 size += sizeof(struct ffa_mtd_v1_0);
413
414 /* Add the size of the v1.0 emad descriptors. */
415 size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
416
417 /* Add the size of the composite mrds. */
418 size += sizeof(struct ffa_comp_mrd);
419
420 /* Add the size of the constituent mrds. */
421 mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
422 emad_array[0].comp_mrd_offset);
423
424 /* Check the calculated address is within the memory descriptor. */
425 if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
426 (uintptr_t)((uint8_t *) orig + desc_size)) {
427 return 0;
428 }
429 size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
430
431 return size;
432 }
433
434 /**
435 * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
436 * @out_obj: The shared memory object to populate the converted descriptor.
437 * @orig: The shared memory object containing the v1.0 descriptor.
438 *
439 * Return: true if the conversion is successful else false.
440 */
441 static bool
spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)442 spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
443 struct spmc_shmem_obj *orig)
444 {
445 struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
446 struct ffa_mtd *out = &out_obj->desc;
447 struct ffa_emad_v1_0 *emad_array_in;
448 struct ffa_emad_v1_0 *emad_array_out;
449 struct ffa_comp_mrd *mrd_in;
450 struct ffa_comp_mrd *mrd_out;
451
452 size_t mrd_in_offset;
453 size_t mrd_out_offset;
454 size_t mrd_size = 0;
455
456 /* Populate the new descriptor format from the v1.0 struct. */
457 out->sender_id = mtd_orig->sender_id;
458 out->memory_region_attributes = mtd_orig->memory_region_attributes;
459 out->flags = mtd_orig->flags;
460 out->handle = mtd_orig->handle;
461 out->tag = mtd_orig->tag;
462 out->emad_count = mtd_orig->emad_count;
463 out->emad_size = sizeof(struct ffa_emad_v1_0);
464
465 /*
466 * We will locate the emad descriptors directly after the ffa_mtd
467 * struct. This will be 8-byte aligned.
468 */
469 out->emad_offset = sizeof(struct ffa_mtd);
470
471 emad_array_in = mtd_orig->emad;
472 emad_array_out = (struct ffa_emad_v1_0 *)
473 ((uint8_t *) out + out->emad_offset);
474
475 /* Copy across the emad structs. */
476 for (unsigned int i = 0U; i < out->emad_count; i++) {
477 /* Bound check for emad array. */
478 if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
479 ((uint8_t *) mtd_orig + orig->desc_size)) {
480 VERBOSE("%s: Invalid mtd structure.\n", __func__);
481 return false;
482 }
483 memcpy(&emad_array_out[i], &emad_array_in[i],
484 sizeof(struct ffa_emad_v1_0));
485 }
486
487 /* Place the mrd descriptors after the end of the emad descriptors.*/
488 mrd_in_offset = emad_array_in->comp_mrd_offset;
489 mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
490 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
491
492 /* Add the size of the composite memory region descriptor. */
493 mrd_size += sizeof(struct ffa_comp_mrd);
494
495 /* Find the mrd descriptor. */
496 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
497
498 /* Add the size of the constituent memory region descriptors. */
499 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
500
501 /*
502 * Update the offset in the emads by the delta between the input and
503 * output addresses.
504 */
505 for (unsigned int i = 0U; i < out->emad_count; i++) {
506 emad_array_out[i].comp_mrd_offset =
507 emad_array_in[i].comp_mrd_offset +
508 (mrd_out_offset - mrd_in_offset);
509 }
510
511 /* Verify that we stay within bound of the memory descriptors. */
512 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
513 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
514 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
515 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
516 ERROR("%s: Invalid mrd structure.\n", __func__);
517 return false;
518 }
519
520 /* Copy the mrd descriptors directly. */
521 memcpy(mrd_out, mrd_in, mrd_size);
522
523 return true;
524 }
525
526 /**
527 * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
528 * v1.0 memory object.
529 * @out_obj: The shared memory object to populate the v1.0 descriptor.
530 * @orig: The shared memory object containing the v1.1 descriptor.
531 *
532 * Return: true if the conversion is successful else false.
533 */
534 static bool
spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj * out_obj,struct spmc_shmem_obj * orig)535 spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
536 struct spmc_shmem_obj *orig)
537 {
538 struct ffa_mtd *mtd_orig = &orig->desc;
539 struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
540 struct ffa_emad_v1_0 *emad_in;
541 struct ffa_emad_v1_0 *emad_array_in;
542 struct ffa_emad_v1_0 *emad_array_out;
543 struct ffa_comp_mrd *mrd_in;
544 struct ffa_comp_mrd *mrd_out;
545
546 size_t mrd_in_offset;
547 size_t mrd_out_offset;
548 size_t emad_out_array_size;
549 size_t mrd_size = 0;
550 size_t orig_desc_size = orig->desc_size;
551
552 /* Populate the v1.0 descriptor format from the v1.1 struct. */
553 out->sender_id = mtd_orig->sender_id;
554 out->memory_region_attributes = mtd_orig->memory_region_attributes;
555 out->flags = mtd_orig->flags;
556 out->handle = mtd_orig->handle;
557 out->tag = mtd_orig->tag;
558 out->emad_count = mtd_orig->emad_count;
559
560 /* Determine the location of the emad array in both descriptors. */
561 emad_array_in = (struct ffa_emad_v1_0 *)
562 ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
563 emad_array_out = out->emad;
564
565 /* Copy across the emad structs. */
566 emad_in = emad_array_in;
567 for (unsigned int i = 0U; i < out->emad_count; i++) {
568 /* Bound check for emad array. */
569 if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
570 ((uint8_t *) mtd_orig + orig_desc_size)) {
571 VERBOSE("%s: Invalid mtd structure.\n", __func__);
572 return false;
573 }
574 memcpy(&emad_array_out[i], emad_in,
575 sizeof(struct ffa_emad_v1_0));
576
577 emad_in += mtd_orig->emad_size;
578 }
579
580 /* Place the mrd descriptors after the end of the emad descriptors. */
581 emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
582
583 mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
584 emad_out_array_size;
585
586 mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
587
588 mrd_in_offset = mtd_orig->emad_offset +
589 (mtd_orig->emad_size * mtd_orig->emad_count);
590
591 /* Add the size of the composite memory region descriptor. */
592 mrd_size += sizeof(struct ffa_comp_mrd);
593
594 /* Find the mrd descriptor. */
595 mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
596
597 /* Add the size of the constituent memory region descriptors. */
598 mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
599
600 /*
601 * Update the offset in the emads by the delta between the input and
602 * output addresses.
603 */
604 emad_in = emad_array_in;
605
606 for (unsigned int i = 0U; i < out->emad_count; i++) {
607 emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
608 (mrd_out_offset -
609 mrd_in_offset);
610 emad_in += mtd_orig->emad_size;
611 }
612
613 /* Verify that we stay within bound of the memory descriptors. */
614 if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
615 (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
616 ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
617 (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
618 ERROR("%s: Invalid mrd structure.\n", __func__);
619 return false;
620 }
621
622 /* Copy the mrd descriptors directly. */
623 memcpy(mrd_out, mrd_in, mrd_size);
624
625 return true;
626 }
627
628 /**
629 * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
630 * the v1.0 format and populates the
631 * provided buffer.
632 * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
633 * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
634 * @buf_size: Size of the buffer to populate.
635 * @offset: The offset of the converted descriptor to copy.
636 * @copy_size: Will be populated with the number of bytes copied.
637 * @out_desc_size: Will be populated with the total size of the v1.0
638 * descriptor.
639 *
640 * Return: 0 if conversion and population succeeded.
641 * Note: This function invalidates the reference to @orig therefore
642 * `spmc_shmem_obj_lookup` must be called if further usage is required.
643 */
644 static uint32_t
spmc_populate_ffa_v1_0_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,size_t offset,size_t * copy_size,size_t * v1_0_desc_size)645 spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
646 size_t buf_size, size_t offset,
647 size_t *copy_size, size_t *v1_0_desc_size)
648 {
649 struct spmc_shmem_obj *v1_0_obj;
650
651 /* Calculate the size that the v1.0 descriptor will require. */
652 *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
653 &orig_obj->desc, orig_obj->desc_size);
654
655 if (*v1_0_desc_size == 0) {
656 ERROR("%s: cannot determine size of descriptor.\n",
657 __func__);
658 return FFA_ERROR_INVALID_PARAMETER;
659 }
660
661 /* Get a new obj to store the v1.0 descriptor. */
662 v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
663 *v1_0_desc_size);
664
665 if (!v1_0_obj) {
666 return FFA_ERROR_NO_MEMORY;
667 }
668
669 /* Perform the conversion from v1.1 to v1.0. */
670 if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
671 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
672 return FFA_ERROR_INVALID_PARAMETER;
673 }
674
675 *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
676 memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
677
678 /*
679 * We're finished with the v1.0 descriptor for now so free it.
680 * Note that this will invalidate any references to the v1.1
681 * descriptor.
682 */
683 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
684
685 return 0;
686 }
687
spmc_compatible_version(uint32_t ffa_version,uint16_t major,uint16_t minor)688 bool spmc_compatible_version(uint32_t ffa_version, uint16_t major,
689 uint16_t minor)
690 {
691 bool bit31_set = ffa_version & FFA_VERSION_BIT31_MASK;
692 uint16_t majv = (ffa_version >> FFA_VERSION_MAJOR_SHIFT) &
693 FFA_VERSION_MAJOR_MASK;
694 uint16_t minv = (ffa_version >> FFA_VERSION_MINOR_SHIFT) &
695 FFA_VERSION_MINOR_MASK;
696
697 return !bit31_set && majv == major && minv >= minor;
698 }
699
700 static int
spmc_validate_mtd_start(struct ffa_mtd * desc,uint32_t ffa_version,size_t fragment_length,size_t total_length)701 spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
702 size_t fragment_length, size_t total_length)
703 {
704 unsigned long long emad_end;
705 unsigned long long emad_size;
706 unsigned long long emad_offset;
707 unsigned int min_desc_size;
708
709 /* Determine the appropriate minimum descriptor size. */
710 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
711 min_desc_size = sizeof(struct ffa_mtd_v1_0);
712 } else if (spmc_compatible_version(ffa_version, 1, 1)) {
713 min_desc_size = sizeof(struct ffa_mtd);
714 } else {
715 return FFA_ERROR_INVALID_PARAMETER;
716 }
717 if (fragment_length < min_desc_size) {
718 WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
719 min_desc_size);
720 return FFA_ERROR_INVALID_PARAMETER;
721 }
722
723 if (desc->emad_count == 0U) {
724 WARN("%s: unsupported attribute desc count %u.\n",
725 __func__, desc->emad_count);
726 return FFA_ERROR_INVALID_PARAMETER;
727 }
728
729 /*
730 * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
731 * format, otherwise assume it is a v1.1 format.
732 */
733 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
734 emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
735 } else {
736 if (!is_aligned(desc->emad_offset, 16)) {
737 WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
738 __func__, desc->emad_offset);
739 return FFA_ERROR_INVALID_PARAMETER;
740 }
741 if (desc->emad_offset < sizeof(struct ffa_mtd)) {
742 WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
743 __func__, desc->emad_offset,
744 sizeof(struct ffa_mtd));
745 return FFA_ERROR_INVALID_PARAMETER;
746 }
747 emad_offset = desc->emad_offset;
748 if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
749 WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
750 desc->emad_size, sizeof(struct ffa_emad_v1_0));
751 return FFA_ERROR_INVALID_PARAMETER;
752 }
753 if (!is_aligned(desc->emad_size, 16)) {
754 WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
755 __func__, desc->emad_size);
756 return FFA_ERROR_INVALID_PARAMETER;
757 }
758 emad_size = desc->emad_size;
759 }
760
761 /*
762 * Overflow is impossible: the arithmetic happens in at least 64-bit
763 * precision, but all of the operands are bounded by UINT32_MAX, and
764 * ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
765 * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
766 * = ((2^32 - 1) * (2^32 + 1))
767 * = (2^64 - 1).
768 */
769 CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
770 emad_end = (desc->emad_count * (unsigned long long)emad_size) +
771 (unsigned long long)sizeof(struct ffa_comp_mrd) +
772 (unsigned long long)emad_offset;
773
774 if (emad_end > total_length) {
775 WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
776 __func__, emad_end, total_length);
777 return FFA_ERROR_INVALID_PARAMETER;
778 }
779
780 return 0;
781 }
782
783 static inline const struct ffa_emad_v1_0 *
emad_advance(const struct ffa_emad_v1_0 * emad,size_t offset)784 emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
785 {
786 return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
787 }
788
789 /**
790 * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
791 * @obj: Object containing ffa_memory_region_descriptor.
792 * @ffa_version: FF-A version of the provided descriptor.
793 *
794 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
795 * constituent_memory_region_descriptor offset or count is invalid.
796 */
spmc_shmem_check_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)797 static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
798 uint32_t ffa_version)
799 {
800 unsigned long long total_page_count;
801 const struct ffa_emad_v1_0 *first_emad;
802 const struct ffa_emad_v1_0 *end_emad;
803 size_t emad_size;
804 uint32_t comp_mrd_offset;
805 size_t header_emad_size;
806 size_t size;
807 size_t count;
808 size_t expected_size;
809 const struct ffa_comp_mrd *comp;
810
811 if (obj->desc_filled != obj->desc_size) {
812 ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
813 __func__, obj->desc_filled, obj->desc_size);
814 panic();
815 }
816
817 if (spmc_validate_mtd_start(&obj->desc, ffa_version,
818 obj->desc_filled, obj->desc_size)) {
819 ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
820 __func__);
821 panic();
822 }
823
824 first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
825 ffa_version, &emad_size);
826 end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
827 comp_mrd_offset = first_emad->comp_mrd_offset;
828
829 /* Loop through the endpoint descriptors, validating each of them. */
830 for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
831 ffa_endpoint_id16_t ep_id;
832
833 /*
834 * If a partition ID resides in the secure world validate that
835 * the partition ID is for a known partition. Ignore any
836 * partition ID belonging to the normal world as it is assumed
837 * the Hypervisor will have validated these.
838 */
839 ep_id = emad->mapd.endpoint_id;
840 if (ffa_is_secure_world_id(ep_id)) {
841 if (spmc_get_sp_ctx(ep_id) == NULL) {
842 WARN("%s: Invalid receiver id 0x%x\n",
843 __func__, ep_id);
844 return FFA_ERROR_INVALID_PARAMETER;
845 }
846 }
847
848 /*
849 * The offset provided to the composite memory region descriptor
850 * should be consistent across endpoint descriptors.
851 */
852 if (comp_mrd_offset != emad->comp_mrd_offset) {
853 ERROR("%s: mismatching offsets provided, %u != %u\n",
854 __func__, emad->comp_mrd_offset, comp_mrd_offset);
855 return FFA_ERROR_INVALID_PARAMETER;
856 }
857
858 /* Advance to the next endpoint descriptor */
859 emad = emad_advance(emad, emad_size);
860
861 /*
862 * Ensure neither this emad nor any subsequent emads have
863 * the same partition ID as the previous emad.
864 */
865 for (const struct ffa_emad_v1_0 *other_emad = emad;
866 other_emad < end_emad;
867 other_emad = emad_advance(other_emad, emad_size)) {
868 if (ep_id == other_emad->mapd.endpoint_id) {
869 WARN("%s: Duplicated endpoint id 0x%x\n",
870 __func__, emad->mapd.endpoint_id);
871 return FFA_ERROR_INVALID_PARAMETER;
872 }
873 }
874 }
875
876 header_emad_size = (size_t)((const uint8_t *)end_emad -
877 (const uint8_t *)&obj->desc);
878
879 /*
880 * Check that the composite descriptor
881 * is after the endpoint descriptors.
882 */
883 if (comp_mrd_offset < header_emad_size) {
884 WARN("%s: invalid object, offset %u < header + emad %zu\n",
885 __func__, comp_mrd_offset, header_emad_size);
886 return FFA_ERROR_INVALID_PARAMETER;
887 }
888
889 /* Ensure the composite descriptor offset is aligned. */
890 if (!is_aligned(comp_mrd_offset, 16)) {
891 WARN("%s: invalid object, unaligned composite memory "
892 "region descriptor offset %u.\n",
893 __func__, comp_mrd_offset);
894 return FFA_ERROR_INVALID_PARAMETER;
895 }
896
897 size = obj->desc_size;
898
899 /* Check that the composite descriptor is in bounds. */
900 if (comp_mrd_offset > size) {
901 WARN("%s: invalid object, offset %u > total size %zu\n",
902 __func__, comp_mrd_offset, obj->desc_size);
903 return FFA_ERROR_INVALID_PARAMETER;
904 }
905 size -= comp_mrd_offset;
906
907 /* Check that there is enough space for the composite descriptor. */
908 if (size < sizeof(struct ffa_comp_mrd)) {
909 WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
910 __func__, comp_mrd_offset, obj->desc_size);
911 return FFA_ERROR_INVALID_PARAMETER;
912 }
913 size -= sizeof(*comp);
914
915 count = size / sizeof(struct ffa_cons_mrd);
916
917 comp = (const struct ffa_comp_mrd *)
918 ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
919
920 if (comp->address_range_count != count) {
921 WARN("%s: invalid object, desc count %u != %zu\n",
922 __func__, comp->address_range_count, count);
923 return FFA_ERROR_INVALID_PARAMETER;
924 }
925
926 /* Ensure that the expected and actual sizes are equal. */
927 expected_size = comp_mrd_offset + sizeof(*comp) +
928 count * sizeof(struct ffa_cons_mrd);
929
930 if (expected_size != obj->desc_size) {
931 WARN("%s: invalid object, computed size %zu != size %zu\n",
932 __func__, expected_size, obj->desc_size);
933 return FFA_ERROR_INVALID_PARAMETER;
934 }
935
936 total_page_count = 0;
937
938 /*
939 * comp->address_range_count is 32-bit, so 'count' must fit in a
940 * uint32_t at this point.
941 */
942 for (size_t i = 0; i < count; i++) {
943 const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
944
945 if (!is_aligned(mrd->address, PAGE_SIZE)) {
946 WARN("%s: invalid object, address in region descriptor "
947 "%zu not 4K aligned (got 0x%016llx)",
948 __func__, i, (unsigned long long)mrd->address);
949 }
950
951 /*
952 * No overflow possible: total_page_count can hold at
953 * least 2^64 - 1, but will be have at most 2^32 - 1.
954 * values added to it, each of which cannot exceed 2^32 - 1.
955 */
956 total_page_count += mrd->page_count;
957 }
958
959 if (comp->total_page_count != total_page_count) {
960 WARN("%s: invalid object, desc total_page_count %u != %llu\n",
961 __func__, comp->total_page_count, total_page_count);
962 return FFA_ERROR_INVALID_PARAMETER;
963 }
964
965 return 0;
966 }
967
968 /**
969 * spmc_shmem_check_state_obj - Check if the descriptor describes memory
970 * regions that are currently involved with an
971 * existing memory transactions. This implies that
972 * the memory is not in a valid state for lending.
973 * @obj: Object containing ffa_memory_region_descriptor.
974 *
975 * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
976 * state.
977 */
spmc_shmem_check_state_obj(struct spmc_shmem_obj * obj,uint32_t ffa_version)978 static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
979 uint32_t ffa_version)
980 {
981 size_t obj_offset = 0;
982 struct spmc_shmem_obj *inflight_obj;
983
984 struct ffa_comp_mrd *other_mrd;
985 struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
986 ffa_version);
987
988 if (requested_mrd == NULL) {
989 return FFA_ERROR_INVALID_PARAMETER;
990 }
991
992 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
993 &obj_offset);
994
995 while (inflight_obj != NULL) {
996 /*
997 * Don't compare the transaction to itself or to partially
998 * transmitted descriptors.
999 */
1000 if ((obj->desc.handle != inflight_obj->desc.handle) &&
1001 (obj->desc_size == obj->desc_filled)) {
1002 other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
1003 FFA_VERSION_COMPILED);
1004 if (other_mrd == NULL) {
1005 return FFA_ERROR_INVALID_PARAMETER;
1006 }
1007 if (overlapping_memory_regions(requested_mrd,
1008 other_mrd)) {
1009 return FFA_ERROR_INVALID_PARAMETER;
1010 }
1011 }
1012
1013 inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
1014 &obj_offset);
1015 }
1016 return 0;
1017 }
1018
spmc_ffa_fill_desc(struct mailbox * mbox,struct spmc_shmem_obj * obj,uint32_t fragment_length,ffa_mtd_flag32_t mtd_flag,uint32_t ffa_version,void * smc_handle)1019 static long spmc_ffa_fill_desc(struct mailbox *mbox,
1020 struct spmc_shmem_obj *obj,
1021 uint32_t fragment_length,
1022 ffa_mtd_flag32_t mtd_flag,
1023 uint32_t ffa_version,
1024 void *smc_handle)
1025 {
1026 int ret;
1027 uint32_t handle_low;
1028 uint32_t handle_high;
1029
1030 if (mbox->rxtx_page_count == 0U) {
1031 WARN("%s: buffer pair not registered.\n", __func__);
1032 ret = FFA_ERROR_INVALID_PARAMETER;
1033 goto err_arg;
1034 }
1035
1036 CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
1037 if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
1038 WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
1039 fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
1040 ret = FFA_ERROR_INVALID_PARAMETER;
1041 goto err_arg;
1042 }
1043
1044 if (fragment_length > obj->desc_size - obj->desc_filled) {
1045 WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
1046 fragment_length, obj->desc_size - obj->desc_filled);
1047 ret = FFA_ERROR_INVALID_PARAMETER;
1048 goto err_arg;
1049 }
1050
1051 memcpy((uint8_t *)&obj->desc + obj->desc_filled,
1052 (uint8_t *) mbox->tx_buffer, fragment_length);
1053
1054 /* Ensure that the sender ID resides in the normal world. */
1055 if (ffa_is_secure_world_id(obj->desc.sender_id)) {
1056 WARN("%s: Invalid sender ID 0x%x.\n",
1057 __func__, obj->desc.sender_id);
1058 ret = FFA_ERROR_DENIED;
1059 goto err_arg;
1060 }
1061
1062 /*
1063 * Ensure the NS bit is set to 0. Only perform this check
1064 * for the first fragment, because the bit will be set for
1065 * all the later fragments.
1066 */
1067 if (obj->desc_filled == 0U &&
1068 (obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1069 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1070 ret = FFA_ERROR_INVALID_PARAMETER;
1071 goto err_arg;
1072 }
1073 /*
1074 * Ensure the NS bit is set to 1 since we only allow non-secure senders.
1075 * The specification requires that the NS bit is MBZ for
1076 * FFA_MEM_{DONATE,LEND,SHARE,RETRIEVE_REQ}, but we set the bit here
1077 * for internal bookkeeping to mark that the transaction did come
1078 * from the normal world.
1079 */
1080 obj->desc.memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1081
1082 /*
1083 * We don't currently support any optional flags so ensure none are
1084 * requested.
1085 */
1086 if (obj->desc.flags != 0U && mtd_flag != 0U &&
1087 (obj->desc.flags != mtd_flag)) {
1088 WARN("%s: invalid memory transaction flags %u != %u\n",
1089 __func__, obj->desc.flags, mtd_flag);
1090 ret = FFA_ERROR_INVALID_PARAMETER;
1091 goto err_arg;
1092 }
1093
1094 if (obj->desc_filled == 0U) {
1095 /* First fragment, descriptor header has been copied */
1096 ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
1097 fragment_length, obj->desc_size);
1098 if (ret != 0) {
1099 goto err_bad_desc;
1100 }
1101
1102 obj->desc.handle = spmc_shmem_obj_state.next_handle++;
1103 obj->desc.flags |= mtd_flag;
1104 }
1105
1106 obj->desc_filled += fragment_length;
1107
1108 handle_low = (uint32_t)obj->desc.handle;
1109 handle_high = obj->desc.handle >> 32;
1110
1111 if (obj->desc_filled != obj->desc_size) {
1112 SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
1113 handle_high, obj->desc_filled,
1114 (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
1115 }
1116
1117 /* The full descriptor has been received, perform any final checks. */
1118
1119 ret = spmc_shmem_check_obj(obj, ffa_version);
1120 if (ret != 0) {
1121 goto err_bad_desc;
1122 }
1123
1124 ret = spmc_shmem_check_state_obj(obj, ffa_version);
1125 if (ret) {
1126 ERROR("%s: invalid memory region descriptor.\n", __func__);
1127 goto err_bad_desc;
1128 }
1129
1130 /*
1131 * Everything checks out, if the sender was using FF-A v1.0, convert
1132 * the descriptor format to use the v1.1 structures.
1133 */
1134 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1135 struct spmc_shmem_obj *v1_1_obj;
1136 uint64_t mem_handle;
1137
1138 /* Calculate the size that the v1.1 descriptor will required. */
1139 uint64_t v1_1_desc_size =
1140 spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
1141 obj->desc_size);
1142
1143 if (v1_1_desc_size > UINT32_MAX) {
1144 ret = FFA_ERROR_NO_MEMORY;
1145 goto err_arg;
1146 }
1147
1148 /* Get a new obj to store the v1.1 descriptor. */
1149 v1_1_obj =
1150 spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
1151
1152 if (!v1_1_obj) {
1153 ret = FFA_ERROR_NO_MEMORY;
1154 goto err_arg;
1155 }
1156
1157 /* Perform the conversion from v1.0 to v1.1. */
1158 v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
1159 v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
1160 if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
1161 ERROR("%s: Could not convert mtd!\n", __func__);
1162 spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
1163 goto err_arg;
1164 }
1165
1166 /*
1167 * We're finished with the v1.0 descriptor so free it
1168 * and continue our checks with the new v1.1 descriptor.
1169 */
1170 mem_handle = obj->desc.handle;
1171 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1172 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1173 if (obj == NULL) {
1174 ERROR("%s: Failed to find converted descriptor.\n",
1175 __func__);
1176 ret = FFA_ERROR_INVALID_PARAMETER;
1177 return spmc_ffa_error_return(smc_handle, ret);
1178 }
1179 }
1180
1181 /* Allow for platform specific operations to be performed. */
1182 ret = plat_spmc_shmem_begin(&obj->desc);
1183 if (ret != 0) {
1184 goto err_arg;
1185 }
1186
1187 SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
1188 0, 0, 0);
1189
1190 err_bad_desc:
1191 err_arg:
1192 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
1193 return spmc_ffa_error_return(smc_handle, ret);
1194 }
1195
1196 /**
1197 * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
1198 * @client: Client state.
1199 * @total_length: Total length of shared memory descriptor.
1200 * @fragment_length: Length of fragment of shared memory descriptor passed in
1201 * this call.
1202 * @address: Not supported, must be 0.
1203 * @page_count: Not supported, must be 0.
1204 * @smc_handle: Handle passed to smc call. Used to return
1205 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1206 *
1207 * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
1208 * to share or lend memory from non-secure os to secure os (with no stream
1209 * endpoints).
1210 *
1211 * Return: 0 on success, error code on failure.
1212 */
spmc_ffa_mem_send(uint32_t smc_fid,bool secure_origin,uint64_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1213 long spmc_ffa_mem_send(uint32_t smc_fid,
1214 bool secure_origin,
1215 uint64_t total_length,
1216 uint32_t fragment_length,
1217 uint64_t address,
1218 uint32_t page_count,
1219 void *cookie,
1220 void *handle,
1221 uint64_t flags)
1222
1223 {
1224 long ret;
1225 struct spmc_shmem_obj *obj;
1226 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1227 ffa_mtd_flag32_t mtd_flag;
1228 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1229 size_t min_desc_size;
1230
1231 if (address != 0U || page_count != 0U) {
1232 WARN("%s: custom memory region for message not supported.\n",
1233 __func__);
1234 return spmc_ffa_error_return(handle,
1235 FFA_ERROR_INVALID_PARAMETER);
1236 }
1237
1238 if (secure_origin) {
1239 WARN("%s: unsupported share direction.\n", __func__);
1240 return spmc_ffa_error_return(handle,
1241 FFA_ERROR_INVALID_PARAMETER);
1242 }
1243
1244 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1245 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1246 } else if (spmc_compatible_version(ffa_version, 1, 1)) {
1247 min_desc_size = sizeof(struct ffa_mtd);
1248 } else {
1249 WARN("%s: bad FF-A version.\n", __func__);
1250 return spmc_ffa_error_return(handle,
1251 FFA_ERROR_INVALID_PARAMETER);
1252 }
1253
1254 /* Check if the descriptor is too small for the FF-A version. */
1255 if (fragment_length < min_desc_size) {
1256 WARN("%s: bad first fragment size %u < %zu\n",
1257 __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
1258 return spmc_ffa_error_return(handle,
1259 FFA_ERROR_INVALID_PARAMETER);
1260 }
1261
1262 if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
1263 mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
1264 } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
1265 mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
1266 } else {
1267 WARN("%s: invalid memory management operation.\n", __func__);
1268 return spmc_ffa_error_return(handle,
1269 FFA_ERROR_INVALID_PARAMETER);
1270 }
1271
1272 spin_lock(&spmc_shmem_obj_state.lock);
1273 obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
1274 if (obj == NULL) {
1275 ret = FFA_ERROR_NO_MEMORY;
1276 goto err_unlock;
1277 }
1278
1279 spin_lock(&mbox->lock);
1280 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
1281 ffa_version, handle);
1282 spin_unlock(&mbox->lock);
1283
1284 spin_unlock(&spmc_shmem_obj_state.lock);
1285 return ret;
1286
1287 err_unlock:
1288 spin_unlock(&spmc_shmem_obj_state.lock);
1289 return spmc_ffa_error_return(handle, ret);
1290 }
1291
1292 /**
1293 * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
1294 * @client: Client state.
1295 * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
1296 * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
1297 * @fragment_length: Length of fragments transmitted.
1298 * @sender_id: Vmid of sender in bits [31:16]
1299 * @smc_handle: Handle passed to smc call. Used to return
1300 * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
1301 *
1302 * Return: @smc_handle on success, error code on failure.
1303 */
spmc_ffa_mem_frag_tx(uint32_t smc_fid,bool secure_origin,uint64_t handle_low,uint64_t handle_high,uint32_t fragment_length,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1304 long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
1305 bool secure_origin,
1306 uint64_t handle_low,
1307 uint64_t handle_high,
1308 uint32_t fragment_length,
1309 uint32_t sender_id,
1310 void *cookie,
1311 void *handle,
1312 uint64_t flags)
1313 {
1314 long ret;
1315 uint32_t desc_sender_id;
1316 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1317 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1318
1319 struct spmc_shmem_obj *obj;
1320 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1321
1322 spin_lock(&spmc_shmem_obj_state.lock);
1323
1324 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1325 if (obj == NULL) {
1326 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1327 __func__, mem_handle);
1328 ret = FFA_ERROR_INVALID_PARAMETER;
1329 goto err_unlock;
1330 }
1331
1332 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1333 if (sender_id != desc_sender_id) {
1334 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1335 sender_id, desc_sender_id);
1336 ret = FFA_ERROR_INVALID_PARAMETER;
1337 goto err_unlock;
1338 }
1339
1340 if (obj->desc_filled == obj->desc_size) {
1341 WARN("%s: object desc already filled, %zu\n", __func__,
1342 obj->desc_filled);
1343 ret = FFA_ERROR_INVALID_PARAMETER;
1344 goto err_unlock;
1345 }
1346
1347 spin_lock(&mbox->lock);
1348 ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
1349 handle);
1350 spin_unlock(&mbox->lock);
1351
1352 spin_unlock(&spmc_shmem_obj_state.lock);
1353 return ret;
1354
1355 err_unlock:
1356 spin_unlock(&spmc_shmem_obj_state.lock);
1357 return spmc_ffa_error_return(handle, ret);
1358 }
1359
1360 /**
1361 * spmc_populate_ffa_hyp_descriptor - Populate the given buffer with a descriptor
1362 * for retrieval by the hypervisor.
1363 * @dst: Buffer to populate hypervisor ffa_memory_region_descriptor.
1364 * @orig_obj: Object containing original ffa_memory_region_descriptor.
1365 * @buf_size: Size of the buffer to populate.
1366 * @ffa_version: FF-A version of the caller.
1367 * @copy_size: Will be populated with the number of bytes copied.
1368 * @desc_size: Will be populated with the total size of the descriptor.
1369 */
1370 static uint32_t
spmc_populate_ffa_hyp_descriptor(void * dst,struct spmc_shmem_obj * orig_obj,size_t buf_size,uint32_t ffa_version,size_t * copy_size,size_t * desc_size)1371 spmc_populate_ffa_hyp_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
1372 size_t buf_size, uint32_t ffa_version,
1373 size_t *copy_size, size_t *desc_size)
1374 {
1375 size_t mtd_size;
1376 size_t emad_size;
1377 size_t mrd_size;
1378 struct ffa_emad_v1_0 hyp_emad = {0};
1379 struct ffa_comp_mrd *orig_mrd;
1380 size_t orig_mrd_offset;
1381 size_t hyp_mrd_offset;
1382 size_t mrd_copy_size;
1383
1384 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1385 struct ffa_mtd_v1_0 mtd = {0};
1386
1387 mtd_size = sizeof(mtd);
1388 emad_size = sizeof(struct ffa_emad_v1_0);
1389 /* The composite MRD starts immediately after our single EMAD */
1390 hyp_mrd_offset = mtd_size + emad_size;
1391 if (hyp_mrd_offset > buf_size) {
1392 return FFA_ERROR_INVALID_PARAMETER;
1393 }
1394
1395 mtd.sender_id = orig_obj->desc.sender_id;
1396 mtd.handle = orig_obj->desc.handle;
1397 mtd.emad_count = 1;
1398 memcpy(dst, &mtd, mtd_size);
1399 } else {
1400 struct ffa_mtd mtd = {0};
1401
1402 mtd_size = sizeof(mtd);
1403 emad_size = sizeof(struct ffa_emad_v1_0);
1404 /* The composite MRD starts immediately after our single EMAD */
1405 hyp_mrd_offset = mtd_size + emad_size;
1406 if (hyp_mrd_offset > buf_size) {
1407 return FFA_ERROR_INVALID_PARAMETER;
1408 }
1409
1410 mtd.sender_id = orig_obj->desc.sender_id;
1411 mtd.handle = orig_obj->desc.handle;
1412 mtd.emad_size = emad_size;
1413 mtd.emad_count = 1;
1414 mtd.emad_offset = mtd_size;
1415 memcpy(dst, &mtd, mtd_size);
1416 }
1417
1418 orig_mrd = spmc_shmem_obj_get_comp_mrd(orig_obj, FFA_VERSION_COMPILED);
1419 orig_mrd_offset = (uint8_t *)orig_mrd - (uint8_t *)(&orig_obj->desc);
1420 mrd_size = sizeof(struct ffa_comp_mrd);
1421 mrd_size += orig_mrd->address_range_count * sizeof(struct ffa_cons_mrd);
1422
1423 /*
1424 * Compute the hypervisor fragment shift that we add to the fragment offset
1425 * to get the actual position inside obj->desc. The composite MRD starts
1426 * at obj->desc+orig_mrd_offset but at a possibly smaller offset within
1427 * the buffer that this function returns because there is only one EMAD.
1428 */
1429 orig_obj->hyp_shift = orig_mrd_offset - hyp_mrd_offset;
1430
1431 mrd_copy_size = MIN(mrd_size, buf_size - hyp_mrd_offset);
1432 *copy_size = hyp_mrd_offset + mrd_copy_size;
1433 *desc_size = hyp_mrd_offset + mrd_size;
1434
1435 hyp_emad.comp_mrd_offset = hyp_mrd_offset;
1436 memcpy((uint8_t *)dst + mtd_size, &hyp_emad, emad_size);
1437 memcpy((uint8_t *)dst + hyp_mrd_offset, orig_mrd, mrd_copy_size);
1438
1439 return 0;
1440 }
1441
1442 /**
1443 * spmc_ffa_mem_retrieve_update_ns_bit - Update the NS bit in the response descriptor
1444 * if the caller implements a version smaller
1445 * than FF-A 1.1 and if they have not requested
1446 * the functionality, or the caller is the
1447 * non-secure world.
1448 * @resp: Descriptor populated in callers RX buffer.
1449 * @sp_ctx: Context of the calling SP.
1450 */
spmc_ffa_mem_retrieve_update_ns_bit(struct ffa_mtd * resp,struct secure_partition_desc * sp_ctx,bool secure_origin)1451 void spmc_ffa_mem_retrieve_update_ns_bit(struct ffa_mtd *resp,
1452 struct secure_partition_desc *sp_ctx,
1453 bool secure_origin)
1454 {
1455 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1456
1457 if (secure_origin &&
1458 sp_ctx->ffa_version < MAKE_FFA_VERSION(1, 1) &&
1459 !sp_ctx->ns_bit_requested) {
1460 resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
1461 } else if (!secure_origin) {
1462 /*
1463 * The NS bit is set by the SPMC in the corresponding invocation
1464 * of the FFA_MEM_RETRIEVE_RESP ABI at the Non-secure physical
1465 * FF-A instance as follows.
1466 */
1467 if (ffa_version > MAKE_FFA_VERSION(1, 0)) {
1468 /*
1469 * The bit is set to b’1 if the version of the Framework
1470 * implemented by the Hypervisor is greater than v1.0
1471 */
1472 resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
1473 } else {
1474 /*
1475 * The bit is set to b’0 if the version of the Framework
1476 * implemented by the Hypervisor is v1.0
1477 */
1478 resp->memory_region_attributes &= ~FFA_MEM_ATTR_NS_BIT;
1479 }
1480 }
1481 }
1482
1483 /**
1484 * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
1485 * @smc_fid: FID of SMC
1486 * @total_length: Total length of retrieve request descriptor if this is
1487 * the first call. Otherwise (unsupported) must be 0.
1488 * @fragment_length: Length of fragment of retrieve request descriptor passed
1489 * in this call. Only @fragment_length == @length is
1490 * supported by this implementation.
1491 * @address: Not supported, must be 0.
1492 * @page_count: Not supported, must be 0.
1493 * @smc_handle: Handle passed to smc call. Used to return
1494 * FFA_MEM_RETRIEVE_RESP.
1495 *
1496 * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
1497 * Used by secure os to retrieve memory already shared by non-secure os,
1498 * or by the hypervisor to retrieve the memory region for a specific handle.
1499 * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
1500 * the client must call FFA_MEM_FRAG_RX until the full response has been
1501 * received.
1502 *
1503 * Return: @handle on success, error code on failure.
1504 */
1505 long
spmc_ffa_mem_retrieve_req(uint32_t smc_fid,bool secure_origin,uint32_t total_length,uint32_t fragment_length,uint64_t address,uint32_t page_count,void * cookie,void * handle,uint64_t flags)1506 spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
1507 bool secure_origin,
1508 uint32_t total_length,
1509 uint32_t fragment_length,
1510 uint64_t address,
1511 uint32_t page_count,
1512 void *cookie,
1513 void *handle,
1514 uint64_t flags)
1515 {
1516 int ret;
1517 size_t buf_size;
1518 size_t copy_size = 0;
1519 size_t min_desc_size;
1520 size_t out_desc_size = 0;
1521
1522 /*
1523 * Currently we are only accessing fields that are the same in both the
1524 * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
1525 * here. We only need validate against the appropriate struct size.
1526 */
1527 struct ffa_mtd *resp;
1528 const struct ffa_mtd *req;
1529 struct spmc_shmem_obj *obj = NULL;
1530 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1531 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1532 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1533
1534 if (address != 0U || page_count != 0U) {
1535 WARN("%s: custom memory region not supported.\n", __func__);
1536 return spmc_ffa_error_return(handle,
1537 FFA_ERROR_INVALID_PARAMETER);
1538 }
1539
1540 spin_lock(&mbox->lock);
1541
1542 req = mbox->tx_buffer;
1543 resp = mbox->rx_buffer;
1544 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1545
1546 if (mbox->rxtx_page_count == 0U) {
1547 WARN("%s: buffer pair not registered.\n", __func__);
1548 ret = FFA_ERROR_INVALID_PARAMETER;
1549 goto err_unlock_mailbox;
1550 }
1551
1552 if (mbox->state != MAILBOX_STATE_EMPTY) {
1553 WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
1554 ret = FFA_ERROR_DENIED;
1555 goto err_unlock_mailbox;
1556 }
1557
1558 if (fragment_length != total_length) {
1559 WARN("%s: fragmented retrieve request not supported.\n",
1560 __func__);
1561 ret = FFA_ERROR_INVALID_PARAMETER;
1562 goto err_unlock_mailbox;
1563 }
1564
1565 /* req->emad_count is not set for retrieve by hypervisor */
1566 if ((secure_origin && req->emad_count == 0U) ||
1567 (!secure_origin && req->emad_count != 0U)) {
1568 WARN("%s: unsupported attribute desc count %u.\n",
1569 __func__, obj->desc.emad_count);
1570 ret = FFA_ERROR_INVALID_PARAMETER;
1571 goto err_unlock_mailbox;
1572 }
1573
1574 /* Determine the appropriate minimum descriptor size. */
1575 if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1576 min_desc_size = sizeof(struct ffa_mtd_v1_0);
1577 } else {
1578 min_desc_size = sizeof(struct ffa_mtd);
1579 }
1580 if (total_length < min_desc_size) {
1581 WARN("%s: invalid length %u < %zu\n", __func__, total_length,
1582 min_desc_size);
1583 ret = FFA_ERROR_INVALID_PARAMETER;
1584 goto err_unlock_mailbox;
1585 }
1586
1587 spin_lock(&spmc_shmem_obj_state.lock);
1588
1589 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1590 if (obj == NULL) {
1591 ret = FFA_ERROR_INVALID_PARAMETER;
1592 goto err_unlock_all;
1593 }
1594
1595 if (obj->desc_filled != obj->desc_size) {
1596 WARN("%s: incomplete object desc filled %zu < size %zu\n",
1597 __func__, obj->desc_filled, obj->desc_size);
1598 ret = FFA_ERROR_INVALID_PARAMETER;
1599 goto err_unlock_all;
1600 }
1601
1602 if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
1603 WARN("%s: wrong sender id 0x%x != 0x%x\n",
1604 __func__, req->sender_id, obj->desc.sender_id);
1605 ret = FFA_ERROR_INVALID_PARAMETER;
1606 goto err_unlock_all;
1607 }
1608
1609 if (req->emad_count != 0U && req->tag != obj->desc.tag) {
1610 WARN("%s: wrong tag 0x%lx != 0x%lx\n",
1611 __func__, req->tag, obj->desc.tag);
1612 ret = FFA_ERROR_INVALID_PARAMETER;
1613 goto err_unlock_all;
1614 }
1615
1616 if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
1617 WARN("%s: mistmatch of endpoint counts %u != %u\n",
1618 __func__, req->emad_count, obj->desc.emad_count);
1619 ret = FFA_ERROR_INVALID_PARAMETER;
1620 goto err_unlock_all;
1621 }
1622
1623 /* Ensure the NS bit is set to 0 in the request. */
1624 if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
1625 WARN("%s: NS mem attributes flags MBZ.\n", __func__);
1626 ret = FFA_ERROR_INVALID_PARAMETER;
1627 goto err_unlock_all;
1628 }
1629
1630 if (req->flags != 0U) {
1631 if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
1632 (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
1633 /*
1634 * If the retrieve request specifies the memory
1635 * transaction ensure it matches what we expect.
1636 */
1637 WARN("%s: wrong mem transaction flags %x != %x\n",
1638 __func__, req->flags, obj->desc.flags);
1639 ret = FFA_ERROR_INVALID_PARAMETER;
1640 goto err_unlock_all;
1641 }
1642
1643 if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
1644 req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
1645 /*
1646 * Current implementation does not support donate and
1647 * it supports no other flags.
1648 */
1649 WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
1650 ret = FFA_ERROR_INVALID_PARAMETER;
1651 goto err_unlock_all;
1652 }
1653 }
1654
1655 /* Validate the caller is a valid participant. */
1656 if (req->emad_count != 0U &&
1657 !spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
1658 WARN("%s: Invalid endpoint ID (0x%x).\n",
1659 __func__, sp_ctx->sp_id);
1660 ret = FFA_ERROR_INVALID_PARAMETER;
1661 goto err_unlock_all;
1662 }
1663
1664 /* Validate that the provided emad offset and structure is valid.*/
1665 for (size_t i = 0; i < req->emad_count; i++) {
1666 size_t emad_size;
1667 struct ffa_emad_v1_0 *emad;
1668
1669 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1670 &emad_size);
1671
1672 if ((uintptr_t) emad >= (uintptr_t)
1673 ((uint8_t *) req + total_length)) {
1674 WARN("Invalid emad access.\n");
1675 ret = FFA_ERROR_INVALID_PARAMETER;
1676 goto err_unlock_all;
1677 }
1678 }
1679
1680 /*
1681 * Validate all the endpoints match in the case of multiple
1682 * borrowers. We don't mandate that the order of the borrowers
1683 * must match in the descriptors therefore check to see if the
1684 * endpoints match in any order.
1685 */
1686 for (size_t i = 0; i < req->emad_count; i++) {
1687 bool found = false;
1688 size_t emad_size;
1689 struct ffa_emad_v1_0 *emad;
1690 struct ffa_emad_v1_0 *other_emad;
1691
1692 emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
1693 &emad_size);
1694
1695 for (size_t j = 0; j < obj->desc.emad_count; j++) {
1696 other_emad = spmc_shmem_obj_get_emad(
1697 &obj->desc, j, MAKE_FFA_VERSION(1, 1),
1698 &emad_size);
1699
1700 if (req->emad_count &&
1701 emad->mapd.endpoint_id ==
1702 other_emad->mapd.endpoint_id) {
1703 found = true;
1704 break;
1705 }
1706 }
1707
1708 if (!found) {
1709 WARN("%s: invalid receiver id (0x%x).\n",
1710 __func__, emad->mapd.endpoint_id);
1711 ret = FFA_ERROR_INVALID_PARAMETER;
1712 goto err_unlock_all;
1713 }
1714 }
1715
1716 mbox->state = MAILBOX_STATE_FULL;
1717
1718 if (req->emad_count != 0U) {
1719 obj->in_use++;
1720 }
1721
1722 /*
1723 * If the caller is v1.0 convert the descriptor, otherwise copy
1724 * directly.
1725 */
1726 if (req->emad_count == 0U) {
1727 /*
1728 * We should only get here from the hypervisor per
1729 * the checks above, but verify once again to be sure.
1730 */
1731 assert(!secure_origin);
1732
1733 ret = spmc_populate_ffa_hyp_descriptor(resp, obj, buf_size, ffa_version,
1734 ©_size, &out_desc_size);
1735 if (ret != 0U) {
1736 ERROR("%s: Failed to process descriptor.\n", __func__);
1737 goto err_unlock_all;
1738 }
1739 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1740 ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
1741 ©_size,
1742 &out_desc_size);
1743 if (ret != 0U) {
1744 ERROR("%s: Failed to process descriptor.\n", __func__);
1745 goto err_unlock_all;
1746 }
1747 } else {
1748 copy_size = MIN(obj->desc_size, buf_size);
1749 out_desc_size = obj->desc_size;
1750
1751 memcpy(resp, &obj->desc, copy_size);
1752 }
1753
1754 /* Update the RX fragment state */
1755 mbox->last_rx_fragment_offset = 0;
1756 mbox->next_rx_fragment_offset = copy_size;
1757
1758 /* Update the NS bit in the response if applicable. */
1759 spmc_ffa_mem_retrieve_update_ns_bit(resp, sp_ctx, secure_origin);
1760
1761 spin_unlock(&spmc_shmem_obj_state.lock);
1762 spin_unlock(&mbox->lock);
1763
1764 SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
1765 copy_size, 0, 0, 0, 0, 0);
1766
1767 err_unlock_all:
1768 spin_unlock(&spmc_shmem_obj_state.lock);
1769 err_unlock_mailbox:
1770 spin_unlock(&mbox->lock);
1771 return spmc_ffa_error_return(handle, ret);
1772 }
1773
1774 /**
1775 * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
1776 * @client: Client state.
1777 * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
1778 * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
1779 * @fragment_offset: Byte offset in descriptor to resume at.
1780 * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
1781 * hypervisor. 0 otherwise.
1782 * @smc_handle: Handle passed to smc call. Used to return
1783 * FFA_MEM_FRAG_TX.
1784 *
1785 * Return: @smc_handle on success, error code on failure.
1786 */
spmc_ffa_mem_frag_rx(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1787 long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
1788 bool secure_origin,
1789 uint32_t handle_low,
1790 uint32_t handle_high,
1791 uint32_t fragment_offset,
1792 uint32_t sender_id,
1793 void *cookie,
1794 void *handle,
1795 uint64_t flags)
1796 {
1797 int ret;
1798 void *src;
1799 size_t buf_size;
1800 size_t copy_size;
1801 size_t full_copy_size;
1802 uint32_t desc_sender_id;
1803 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1804 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
1805 struct spmc_shmem_obj *obj;
1806 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1807 uint32_t actual_fragment_offset;
1808
1809 spin_lock(&spmc_shmem_obj_state.lock);
1810
1811 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
1812 if (obj == NULL) {
1813 WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
1814 __func__, mem_handle);
1815 ret = FFA_ERROR_INVALID_PARAMETER;
1816 goto err_unlock_shmem;
1817 }
1818
1819 desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
1820 if (sender_id != 0U && sender_id != desc_sender_id) {
1821 WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
1822 sender_id, desc_sender_id);
1823 ret = FFA_ERROR_INVALID_PARAMETER;
1824 goto err_unlock_shmem;
1825 }
1826
1827 actual_fragment_offset = fragment_offset;
1828 if (!secure_origin) {
1829 /* Apply the hypervisor shift if the request came from NS */
1830 actual_fragment_offset += obj->hyp_shift;
1831 }
1832
1833 if (actual_fragment_offset >= obj->desc_size) {
1834 WARN("%s: invalid fragment_offset 0x%x actual 0x%x >= 0x%zx\n",
1835 __func__, fragment_offset, actual_fragment_offset, obj->desc_size);
1836 ret = FFA_ERROR_INVALID_PARAMETER;
1837 goto err_unlock_shmem;
1838 }
1839
1840 spin_lock(&mbox->lock);
1841
1842 if (mbox->rxtx_page_count == 0U) {
1843 WARN("%s: buffer pair not registered.\n", __func__);
1844 ret = FFA_ERROR_INVALID_PARAMETER;
1845 goto err_unlock_all;
1846 }
1847
1848 if (mbox->state != MAILBOX_STATE_EMPTY) {
1849 WARN("%s: RX Buffer is full!\n", __func__);
1850 ret = FFA_ERROR_DENIED;
1851 goto err_unlock_all;
1852 }
1853
1854 if (fragment_offset != mbox->last_rx_fragment_offset &&
1855 fragment_offset != mbox->next_rx_fragment_offset) {
1856 WARN("%s: invalid fragment_offset 0x%x expected 0x%x or 0x%x\n",
1857 __func__, fragment_offset, mbox->last_rx_fragment_offset,
1858 mbox->next_rx_fragment_offset);
1859 ret = FFA_ERROR_INVALID_PARAMETER;
1860 goto err_unlock_all;
1861 }
1862
1863 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1864
1865 mbox->state = MAILBOX_STATE_FULL;
1866
1867 /*
1868 * If we are handling the "Support for retrieval by hypervisor" case,
1869 * return the specially constructed single-EMAD descriptor. In all other cases,
1870 * if the caller is v1.0 convert the descriptor, otherwise copy directly.
1871 */
1872 if (!secure_origin && fragment_offset == 0U) {
1873 size_t out_desc_size;
1874
1875 /*
1876 * The caller requested a retransmit of the initial fragment.
1877 * Rebuild it here from scratch since we do not have
1878 * it stored anywhere.
1879 */
1880 ret = spmc_populate_ffa_hyp_descriptor(mbox->rx_buffer, obj,
1881 buf_size, ffa_version,
1882 ©_size, &out_desc_size);
1883 if (ret != 0U) {
1884 ERROR("%s: Failed to process descriptor.\n", __func__);
1885 goto err_unlock_all;
1886 }
1887 } else if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
1888 size_t out_desc_size;
1889
1890 ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
1891 buf_size,
1892 actual_fragment_offset,
1893 ©_size,
1894 &out_desc_size);
1895 if (ret != 0U) {
1896 ERROR("%s: Failed to process descriptor.\n", __func__);
1897 goto err_unlock_all;
1898 }
1899 } else {
1900 full_copy_size = obj->desc_size - actual_fragment_offset;
1901 copy_size = MIN(full_copy_size, buf_size);
1902
1903 src = &obj->desc;
1904
1905 memcpy(mbox->rx_buffer, src + actual_fragment_offset, copy_size);
1906 }
1907
1908 mbox->last_rx_fragment_offset = fragment_offset;
1909 mbox->next_rx_fragment_offset = fragment_offset + copy_size;
1910
1911 spin_unlock(&mbox->lock);
1912 spin_unlock(&spmc_shmem_obj_state.lock);
1913
1914 SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
1915 copy_size, sender_id, 0, 0, 0);
1916
1917 err_unlock_all:
1918 spin_unlock(&mbox->lock);
1919 err_unlock_shmem:
1920 spin_unlock(&spmc_shmem_obj_state.lock);
1921 return spmc_ffa_error_return(handle, ret);
1922 }
1923
1924 /**
1925 * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
1926 * @client: Client state.
1927 *
1928 * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
1929 * Used by secure os release previously shared memory to non-secure os.
1930 *
1931 * The handle to release must be in the client's (secure os's) transmit buffer.
1932 *
1933 * Return: 0 on success, error code on failure.
1934 */
spmc_ffa_mem_relinquish(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t fragment_offset,uint32_t sender_id,void * cookie,void * handle,uint64_t flags)1935 int spmc_ffa_mem_relinquish(uint32_t smc_fid,
1936 bool secure_origin,
1937 uint32_t handle_low,
1938 uint32_t handle_high,
1939 uint32_t fragment_offset,
1940 uint32_t sender_id,
1941 void *cookie,
1942 void *handle,
1943 uint64_t flags)
1944 {
1945 int ret;
1946 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1947 struct spmc_shmem_obj *obj;
1948 const struct ffa_mem_relinquish_descriptor *req;
1949 struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
1950
1951 if (!secure_origin) {
1952 WARN("%s: unsupported relinquish direction.\n", __func__);
1953 return spmc_ffa_error_return(handle,
1954 FFA_ERROR_INVALID_PARAMETER);
1955 }
1956
1957 spin_lock(&mbox->lock);
1958
1959 if (mbox->rxtx_page_count == 0U) {
1960 WARN("%s: buffer pair not registered.\n", __func__);
1961 ret = FFA_ERROR_INVALID_PARAMETER;
1962 goto err_unlock_mailbox;
1963 }
1964
1965 req = mbox->tx_buffer;
1966
1967 if (req->flags != 0U) {
1968 WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
1969 ret = FFA_ERROR_INVALID_PARAMETER;
1970 goto err_unlock_mailbox;
1971 }
1972
1973 if (req->endpoint_count == 0) {
1974 WARN("%s: endpoint count cannot be 0.\n", __func__);
1975 ret = FFA_ERROR_INVALID_PARAMETER;
1976 goto err_unlock_mailbox;
1977 }
1978
1979 spin_lock(&spmc_shmem_obj_state.lock);
1980
1981 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
1982 if (obj == NULL) {
1983 ret = FFA_ERROR_INVALID_PARAMETER;
1984 goto err_unlock_all;
1985 }
1986
1987 /*
1988 * Validate the endpoint ID was populated correctly. We don't currently
1989 * support proxy endpoints so the endpoint count should always be 1.
1990 */
1991 if (req->endpoint_count != 1U) {
1992 WARN("%s: unsupported endpoint count %u != 1\n", __func__,
1993 req->endpoint_count);
1994 ret = FFA_ERROR_INVALID_PARAMETER;
1995 goto err_unlock_all;
1996 }
1997
1998 /* Validate provided endpoint ID matches the partition ID. */
1999 if (req->endpoint_array[0] != sp_ctx->sp_id) {
2000 WARN("%s: invalid endpoint ID %u != %u\n", __func__,
2001 req->endpoint_array[0], sp_ctx->sp_id);
2002 ret = FFA_ERROR_INVALID_PARAMETER;
2003 goto err_unlock_all;
2004 }
2005
2006 /* Validate the caller is a valid participant. */
2007 if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
2008 WARN("%s: Invalid endpoint ID (0x%x).\n",
2009 __func__, req->endpoint_array[0]);
2010 ret = FFA_ERROR_INVALID_PARAMETER;
2011 goto err_unlock_all;
2012 }
2013
2014 if (obj->in_use == 0U) {
2015 ret = FFA_ERROR_INVALID_PARAMETER;
2016 goto err_unlock_all;
2017 }
2018 obj->in_use--;
2019
2020 spin_unlock(&spmc_shmem_obj_state.lock);
2021 spin_unlock(&mbox->lock);
2022
2023 SMC_RET1(handle, FFA_SUCCESS_SMC32);
2024
2025 err_unlock_all:
2026 spin_unlock(&spmc_shmem_obj_state.lock);
2027 err_unlock_mailbox:
2028 spin_unlock(&mbox->lock);
2029 return spmc_ffa_error_return(handle, ret);
2030 }
2031
2032 /**
2033 * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
2034 * @client: Client state.
2035 * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
2036 * @handle_high: Unique handle of shared memory object to reclaim.
2037 * Bit[63:32].
2038 * @flags: Unsupported, ignored.
2039 *
2040 * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
2041 * Used by non-secure os reclaim memory previously shared with secure os.
2042 *
2043 * Return: 0 on success, error code on failure.
2044 */
spmc_ffa_mem_reclaim(uint32_t smc_fid,bool secure_origin,uint32_t handle_low,uint32_t handle_high,uint32_t mem_flags,uint64_t x4,void * cookie,void * handle,uint64_t flags)2045 int spmc_ffa_mem_reclaim(uint32_t smc_fid,
2046 bool secure_origin,
2047 uint32_t handle_low,
2048 uint32_t handle_high,
2049 uint32_t mem_flags,
2050 uint64_t x4,
2051 void *cookie,
2052 void *handle,
2053 uint64_t flags)
2054 {
2055 int ret;
2056 struct spmc_shmem_obj *obj;
2057 uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
2058
2059 if (secure_origin) {
2060 WARN("%s: unsupported reclaim direction.\n", __func__);
2061 return spmc_ffa_error_return(handle,
2062 FFA_ERROR_INVALID_PARAMETER);
2063 }
2064
2065 if (mem_flags != 0U) {
2066 WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
2067 return spmc_ffa_error_return(handle,
2068 FFA_ERROR_INVALID_PARAMETER);
2069 }
2070
2071 spin_lock(&spmc_shmem_obj_state.lock);
2072
2073 obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
2074 if (obj == NULL) {
2075 ret = FFA_ERROR_INVALID_PARAMETER;
2076 goto err_unlock;
2077 }
2078 if (obj->in_use != 0U) {
2079 ret = FFA_ERROR_DENIED;
2080 goto err_unlock;
2081 }
2082
2083 if (obj->desc_filled != obj->desc_size) {
2084 WARN("%s: incomplete object desc filled %zu < size %zu\n",
2085 __func__, obj->desc_filled, obj->desc_size);
2086 ret = FFA_ERROR_INVALID_PARAMETER;
2087 goto err_unlock;
2088 }
2089
2090 /* Allow for platform specific operations to be performed. */
2091 ret = plat_spmc_shmem_reclaim(&obj->desc);
2092 if (ret != 0) {
2093 goto err_unlock;
2094 }
2095
2096 spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
2097 spin_unlock(&spmc_shmem_obj_state.lock);
2098
2099 SMC_RET1(handle, FFA_SUCCESS_SMC32);
2100
2101 err_unlock:
2102 spin_unlock(&spmc_shmem_obj_state.lock);
2103 return spmc_ffa_error_return(handle, ret);
2104 }
2105