xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision ac1c95dd8c14b60fcb032301f9d60ae20c57f09c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 static unsigned int spmc_notif_lock = SPINLOCK_UNLOCK;
51 static int do_bottom_half_value = -1;
52 static uint16_t notif_vm_id;
53 static bool spmc_notif_is_ready;
54 
55 /* Initialized in spmc_init() below */
56 uint16_t optee_endpoint_id __nex_bss;
57 uint16_t spmc_id __nex_bss;
58 #ifdef CFG_CORE_SEL1_SPMC
59 uint16_t spmd_id __nex_bss;
60 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
61 				      FFA_PART_PROP_DIRECT_REQ_SEND |
62 #ifdef CFG_NS_VIRTUALIZATION
63 				      FFA_PART_PROP_NOTIF_CREATED |
64 				      FFA_PART_PROP_NOTIF_DESTROYED |
65 #endif
66 #ifdef ARM64
67 				      FFA_PART_PROP_AARCH64_STATE |
68 #endif
69 				      FFA_PART_PROP_IS_PE_ID;
70 
71 static uint32_t my_uuid_words[] = {
72 	/*
73 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
74 	 *   SP, or
75 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
76 	 *   logical partition, residing in the same exception level as the
77 	 *   SPMC
78 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
79 	 */
80 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
81 };
82 
83 /*
84  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
85  *
86  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
87  * access this includes the use of content of struct ffa_rxtx::rx and
88  * @frag_state_head.
89  *
90  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
91  * ffa_rxtx::tx and false when it is owned by normal world.
92  *
93  * Note that we can't prevent normal world from updating the content of
94  * these buffers so we must always be careful when reading. while we hold
95  * the lock.
96  */
97 
98 static struct ffa_rxtx my_rxtx __nex_bss;
99 
100 static bool is_nw_buf(struct ffa_rxtx *rxtx)
101 {
102 	return rxtx == &my_rxtx;
103 }
104 
105 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
106 	SLIST_HEAD_INITIALIZER(&frag_state_head);
107 
108 static uint64_t notif_pending_bitmap;
109 static uint64_t notif_bound_bitmap;
110 static bool notif_vm_id_valid;
111 static int notif_intid = -1;
112 #else
113 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
114 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
115 static struct ffa_rxtx my_rxtx = {
116 	.rx = __rx_buf,
117 	.tx = __tx_buf,
118 	.size = sizeof(__rx_buf),
119 };
120 #endif
121 
122 static uint32_t swap_src_dst(uint32_t src_dst)
123 {
124 	return (src_dst >> 16) | (src_dst << 16);
125 }
126 
127 static uint16_t get_sender_id(uint32_t src_dst)
128 {
129 	return src_dst >> 16;
130 }
131 
132 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
133 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
134 {
135 	*args = (struct thread_smc_args){ .a0 = fid,
136 					  .a1 = src_dst,
137 					  .a2 = w2,
138 					  .a3 = w3,
139 					  .a4 = w4,
140 					  .a5 = w5, };
141 }
142 
143 static void set_simple_ret_val(struct thread_smc_args *args, int ffa_ret)
144 {
145 	if (ffa_ret)
146 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
147 	else
148 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
149 }
150 
151 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
152 {
153 	/*
154 	 * No locking, if the caller does concurrent calls to this it's
155 	 * only making a mess for itself. We must be able to renegotiate
156 	 * the FF-A version in order to support differing versions between
157 	 * the loader and the driver.
158 	 */
159 	if (vers < FFA_VERSION_1_1)
160 		rxtx->ffa_vers = FFA_VERSION_1_0;
161 	else
162 		rxtx->ffa_vers = FFA_VERSION_1_1;
163 
164 	return rxtx->ffa_vers;
165 }
166 
167 static bool is_ffa_success(uint32_t fid)
168 {
169 #ifdef ARM64
170 	if (fid == FFA_SUCCESS_64)
171 		return true;
172 #endif
173 	return fid == FFA_SUCCESS_32;
174 }
175 
176 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
177 {
178 	if (is_ffa_success(args->a0))
179 		return FFA_OK;
180 	if (args->a0 == FFA_ERROR && args->a2)
181 		return args->a2;
182 	return FFA_NOT_SUPPORTED;
183 }
184 
185 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
186 			   unsigned long a3, unsigned long a4)
187 {
188 	struct thread_smc_args args = {
189 		.a0 = fid,
190 		.a1 = a1,
191 		.a2 = a2,
192 		.a3 = a3,
193 		.a4 = a4,
194 	};
195 
196 	thread_smccc(&args);
197 
198 	return get_ffa_ret_code(&args);
199 }
200 
201 static int __maybe_unused ffa_features(uint32_t id)
202 {
203 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
204 }
205 
206 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
207 					       uint32_t flags, uint64_t bitmap)
208 {
209 	return ffa_simple_call(FFA_NOTIFICATION_SET,
210 			       SHIFT_U32(src, 16) | dst, flags,
211 			       low32_from_64(bitmap), high32_from_64(bitmap));
212 }
213 
214 #if defined(CFG_CORE_SEL1_SPMC)
215 static void handle_features(struct thread_smc_args *args)
216 {
217 	uint32_t ret_fid = FFA_ERROR;
218 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
219 
220 	switch (args->a1) {
221 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
222 		if (spmc_notif_is_ready) {
223 			ret_fid = FFA_SUCCESS_32;
224 			ret_w2 = notif_intid;
225 		}
226 		break;
227 
228 #ifdef ARM64
229 	case FFA_RXTX_MAP_64:
230 #endif
231 	case FFA_RXTX_MAP_32:
232 		ret_fid = FFA_SUCCESS_32;
233 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
234 		break;
235 #ifdef ARM64
236 	case FFA_MEM_SHARE_64:
237 #endif
238 	case FFA_MEM_SHARE_32:
239 		ret_fid = FFA_SUCCESS_32;
240 		/*
241 		 * Partition manager supports transmission of a memory
242 		 * transaction descriptor in a buffer dynamically allocated
243 		 * by the endpoint.
244 		 */
245 		ret_w2 = BIT(0);
246 		break;
247 
248 	case FFA_ERROR:
249 	case FFA_VERSION:
250 	case FFA_SUCCESS_32:
251 #ifdef ARM64
252 	case FFA_SUCCESS_64:
253 #endif
254 	case FFA_FEATURES:
255 	case FFA_SPM_ID_GET:
256 	case FFA_MEM_FRAG_TX:
257 	case FFA_MEM_RECLAIM:
258 	case FFA_MSG_SEND_DIRECT_REQ_64:
259 	case FFA_MSG_SEND_DIRECT_REQ_32:
260 	case FFA_INTERRUPT:
261 	case FFA_PARTITION_INFO_GET:
262 	case FFA_RXTX_UNMAP:
263 	case FFA_RX_RELEASE:
264 	case FFA_FEATURE_MANAGED_EXIT_INTR:
265 	case FFA_NOTIFICATION_BITMAP_CREATE:
266 	case FFA_NOTIFICATION_BITMAP_DESTROY:
267 	case FFA_NOTIFICATION_BIND:
268 	case FFA_NOTIFICATION_UNBIND:
269 	case FFA_NOTIFICATION_SET:
270 	case FFA_NOTIFICATION_GET:
271 	case FFA_NOTIFICATION_INFO_GET_32:
272 #ifdef ARM64
273 	case FFA_NOTIFICATION_INFO_GET_64:
274 #endif
275 		ret_fid = FFA_SUCCESS_32;
276 		ret_w2 = FFA_PARAM_MBZ;
277 		break;
278 	default:
279 		break;
280 	}
281 
282 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
283 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
284 }
285 
286 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
287 {
288 	tee_mm_entry_t *mm = NULL;
289 
290 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
291 		return FFA_INVALID_PARAMETERS;
292 
293 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
294 	if (!mm)
295 		return FFA_NO_MEMORY;
296 
297 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
298 					  sz / SMALL_PAGE_SIZE,
299 					  MEM_AREA_NSEC_SHM)) {
300 		tee_mm_free(mm);
301 		return FFA_INVALID_PARAMETERS;
302 	}
303 
304 	*va_ret = (void *)tee_mm_get_smem(mm);
305 	return 0;
306 }
307 
308 void spmc_handle_spm_id_get(struct thread_smc_args *args)
309 {
310 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id,
311 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
312 }
313 
314 static void unmap_buf(void *va, size_t sz)
315 {
316 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
317 
318 	assert(mm);
319 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
320 	tee_mm_free(mm);
321 }
322 
323 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
324 {
325 	int rc = 0;
326 	unsigned int sz = 0;
327 	paddr_t rx_pa = 0;
328 	paddr_t tx_pa = 0;
329 	void *rx = NULL;
330 	void *tx = NULL;
331 
332 	cpu_spin_lock(&rxtx->spinlock);
333 
334 	if (args->a3 & GENMASK_64(63, 6)) {
335 		rc = FFA_INVALID_PARAMETERS;
336 		goto out;
337 	}
338 
339 	sz = args->a3 * SMALL_PAGE_SIZE;
340 	if (!sz) {
341 		rc = FFA_INVALID_PARAMETERS;
342 		goto out;
343 	}
344 	/* TX/RX are swapped compared to the caller */
345 	tx_pa = args->a2;
346 	rx_pa = args->a1;
347 
348 	if (rxtx->size) {
349 		rc = FFA_DENIED;
350 		goto out;
351 	}
352 
353 	/*
354 	 * If the buffer comes from a SP the address is virtual and already
355 	 * mapped.
356 	 */
357 	if (is_nw_buf(rxtx)) {
358 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
359 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
360 			bool tx_alloced = false;
361 
362 			/*
363 			 * With virtualization we establish this mapping in
364 			 * the nexus mapping which then is replicated to
365 			 * each partition.
366 			 *
367 			 * This means that this mapping must be done before
368 			 * any partition is created and then must not be
369 			 * changed.
370 			 */
371 
372 			/*
373 			 * core_mmu_add_mapping() may reuse previous
374 			 * mappings. First check if there's any mappings to
375 			 * reuse so we know how to clean up in case of
376 			 * failure.
377 			 */
378 			tx = phys_to_virt(tx_pa, mt, sz);
379 			rx = phys_to_virt(rx_pa, mt, sz);
380 			if (!tx) {
381 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
382 				if (!tx) {
383 					rc = FFA_NO_MEMORY;
384 					goto out;
385 				}
386 				tx_alloced = true;
387 			}
388 			if (!rx)
389 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
390 
391 			if (!rx) {
392 				if (tx_alloced && tx)
393 					core_mmu_remove_mapping(mt, tx, sz);
394 				rc = FFA_NO_MEMORY;
395 				goto out;
396 			}
397 		} else {
398 			rc = map_buf(tx_pa, sz, &tx);
399 			if (rc)
400 				goto out;
401 			rc = map_buf(rx_pa, sz, &rx);
402 			if (rc) {
403 				unmap_buf(tx, sz);
404 				goto out;
405 			}
406 		}
407 		rxtx->tx = tx;
408 		rxtx->rx = rx;
409 	} else {
410 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
411 			rc = FFA_INVALID_PARAMETERS;
412 			goto out;
413 		}
414 
415 		if (!virt_to_phys((void *)tx_pa) ||
416 		    !virt_to_phys((void *)rx_pa)) {
417 			rc = FFA_INVALID_PARAMETERS;
418 			goto out;
419 		}
420 
421 		rxtx->tx = (void *)tx_pa;
422 		rxtx->rx = (void *)rx_pa;
423 	}
424 
425 	rxtx->size = sz;
426 	rxtx->tx_is_mine = true;
427 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
428 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
429 out:
430 	cpu_spin_unlock(&rxtx->spinlock);
431 	set_simple_ret_val(args, rc);
432 }
433 
434 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
435 {
436 	int rc = FFA_INVALID_PARAMETERS;
437 
438 	cpu_spin_lock(&rxtx->spinlock);
439 
440 	if (!rxtx->size)
441 		goto out;
442 
443 	/*
444 	 * We don't unmap the SP memory as the SP might still use it.
445 	 * We avoid to make changes to nexus mappings at this stage since
446 	 * there currently isn't a way to replicate those changes to all
447 	 * partitions.
448 	 */
449 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
450 		unmap_buf(rxtx->rx, rxtx->size);
451 		unmap_buf(rxtx->tx, rxtx->size);
452 	}
453 	rxtx->size = 0;
454 	rxtx->rx = NULL;
455 	rxtx->tx = NULL;
456 	rc = 0;
457 out:
458 	cpu_spin_unlock(&rxtx->spinlock);
459 	set_simple_ret_val(args, rc);
460 }
461 
462 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
463 {
464 	int rc = 0;
465 
466 	cpu_spin_lock(&rxtx->spinlock);
467 	/* The senders RX is our TX */
468 	if (!rxtx->size || rxtx->tx_is_mine) {
469 		rc = FFA_DENIED;
470 	} else {
471 		rc = 0;
472 		rxtx->tx_is_mine = true;
473 	}
474 	cpu_spin_unlock(&rxtx->spinlock);
475 
476 	set_simple_ret_val(args, rc);
477 }
478 
479 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
480 {
481 	return !w0 && !w1 && !w2 && !w3;
482 }
483 
484 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
485 {
486 	/*
487 	 * This depends on which UUID we have been assigned.
488 	 * TODO add a generic mechanism to obtain our UUID.
489 	 *
490 	 * The test below is for the hard coded UUID
491 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
492 	 */
493 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
494 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
495 }
496 
497 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
498 				     size_t idx, uint16_t endpoint_id,
499 				     uint16_t execution_context,
500 				     uint32_t part_props,
501 				     const uint32_t uuid_words[4])
502 {
503 	struct ffa_partition_info_x *fpi = NULL;
504 	size_t fpi_size = sizeof(*fpi);
505 
506 	if (ffa_vers >= FFA_VERSION_1_1)
507 		fpi_size += FFA_UUID_SIZE;
508 
509 	if ((idx + 1) * fpi_size > blen)
510 		return TEE_ERROR_OUT_OF_MEMORY;
511 
512 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
513 	fpi->id = endpoint_id;
514 	/* Number of execution contexts implemented by this partition */
515 	fpi->execution_context = execution_context;
516 
517 	fpi->partition_properties = part_props;
518 
519 	if (ffa_vers >= FFA_VERSION_1_1) {
520 		if (uuid_words)
521 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
522 		else
523 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
524 	}
525 
526 	return TEE_SUCCESS;
527 }
528 
529 static int handle_partition_info_get_all(size_t *elem_count,
530 					 struct ffa_rxtx *rxtx, bool count_only)
531 {
532 	if (!count_only) {
533 		/* Add OP-TEE SP */
534 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
535 					      rxtx->size, 0, optee_endpoint_id,
536 					      CFG_TEE_CORE_NB_CORE,
537 					      my_part_props, my_uuid_words))
538 			return FFA_NO_MEMORY;
539 	}
540 	*elem_count = 1;
541 
542 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
543 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
544 					  NULL, elem_count, count_only))
545 			return FFA_NO_MEMORY;
546 	}
547 
548 	return FFA_OK;
549 }
550 
551 void spmc_handle_partition_info_get(struct thread_smc_args *args,
552 				    struct ffa_rxtx *rxtx)
553 {
554 	TEE_Result res = TEE_SUCCESS;
555 	uint32_t ret_fid = FFA_ERROR;
556 	uint32_t fpi_size = 0;
557 	uint32_t rc = 0;
558 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
559 
560 	if (!count_only) {
561 		cpu_spin_lock(&rxtx->spinlock);
562 
563 		if (!rxtx->size || !rxtx->tx_is_mine) {
564 			rc = FFA_BUSY;
565 			goto out;
566 		}
567 	}
568 
569 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
570 		size_t elem_count = 0;
571 
572 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
573 							count_only);
574 
575 		if (ret_fid) {
576 			rc = ret_fid;
577 			ret_fid = FFA_ERROR;
578 		} else {
579 			ret_fid = FFA_SUCCESS_32;
580 			rc = elem_count;
581 		}
582 
583 		goto out;
584 	}
585 
586 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
587 		if (!count_only) {
588 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
589 							rxtx->tx, rxtx->size, 0,
590 							optee_endpoint_id,
591 							CFG_TEE_CORE_NB_CORE,
592 							my_part_props,
593 							my_uuid_words);
594 			if (res) {
595 				ret_fid = FFA_ERROR;
596 				rc = FFA_INVALID_PARAMETERS;
597 				goto out;
598 			}
599 		}
600 		rc = 1;
601 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
602 		uint32_t uuid_array[4] = { 0 };
603 		TEE_UUID uuid = { };
604 		size_t count = 0;
605 
606 		uuid_array[0] = args->a1;
607 		uuid_array[1] = args->a2;
608 		uuid_array[2] = args->a3;
609 		uuid_array[3] = args->a4;
610 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
611 
612 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
613 					    rxtx->size, &uuid, &count,
614 					    count_only);
615 		if (res != TEE_SUCCESS) {
616 			ret_fid = FFA_ERROR;
617 			rc = FFA_INVALID_PARAMETERS;
618 			goto out;
619 		}
620 		rc = count;
621 	} else {
622 		ret_fid = FFA_ERROR;
623 		rc = FFA_INVALID_PARAMETERS;
624 		goto out;
625 	}
626 
627 	ret_fid = FFA_SUCCESS_32;
628 
629 out:
630 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
631 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
632 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
633 
634 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
635 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
636 	if (!count_only) {
637 		rxtx->tx_is_mine = false;
638 		cpu_spin_unlock(&rxtx->spinlock);
639 	}
640 }
641 
642 static void spmc_handle_run(struct thread_smc_args *args)
643 {
644 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
645 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
646 	uint32_t rc = FFA_OK;
647 
648 	if (endpoint != optee_endpoint_id) {
649 		/*
650 		 * The endpoint should be an SP, try to resume the SP from
651 		 * preempted into busy state.
652 		 */
653 		rc = spmc_sp_resume_from_preempted(endpoint);
654 		if (rc)
655 			goto out;
656 	}
657 
658 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
659 
660 	/* thread_resume_from_rpc return only of the thread_id is invalid */
661 	rc = FFA_INVALID_PARAMETERS;
662 
663 out:
664 	set_simple_ret_val(args, rc);
665 }
666 #endif /*CFG_CORE_SEL1_SPMC*/
667 
668 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
669 					uint16_t vm_id)
670 {
671 	uint32_t old_itr_status = 0;
672 
673 	if (!spmc_notif_is_ready) {
674 		/*
675 		 * This should never happen, not if normal world respects the
676 		 * exchanged capabilities.
677 		 */
678 		EMSG("Asynchronous notifications are not ready");
679 		return TEE_ERROR_NOT_IMPLEMENTED;
680 	}
681 
682 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
683 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
684 		return TEE_ERROR_BAD_PARAMETERS;
685 	}
686 
687 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
688 	do_bottom_half_value = bottom_half_value;
689 	if (!IS_ENABLED(CFG_CORE_SEL1_SPMC))
690 		notif_vm_id = vm_id;
691 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
692 
693 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
694 	return TEE_SUCCESS;
695 }
696 
697 static void handle_yielding_call(struct thread_smc_args *args,
698 				 uint32_t direct_resp_fid)
699 {
700 	TEE_Result res = 0;
701 
702 	thread_check_canaries();
703 
704 #ifdef ARM64
705 	/* Saving this for an eventual RPC */
706 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
707 #endif
708 
709 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
710 		/* Note connection to struct thread_rpc_arg::ret */
711 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
712 				       0);
713 		res = TEE_ERROR_BAD_PARAMETERS;
714 	} else {
715 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
716 				     args->a6, args->a7);
717 		res = TEE_ERROR_BUSY;
718 	}
719 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
720 		      0, res, 0, 0);
721 }
722 
723 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
724 {
725 	uint64_t cookie = reg_pair_to_64(a5, a4);
726 	uint32_t res = 0;
727 
728 	res = mobj_ffa_unregister_by_cookie(cookie);
729 	switch (res) {
730 	case TEE_SUCCESS:
731 	case TEE_ERROR_ITEM_NOT_FOUND:
732 		return 0;
733 	case TEE_ERROR_BUSY:
734 		EMSG("res %#"PRIx32, res);
735 		return FFA_BUSY;
736 	default:
737 		EMSG("res %#"PRIx32, res);
738 		return FFA_INVALID_PARAMETERS;
739 	}
740 }
741 
742 static void handle_blocking_call(struct thread_smc_args *args,
743 				 uint32_t direct_resp_fid)
744 {
745 	uint32_t sec_caps = 0;
746 
747 	switch (args->a3) {
748 	case OPTEE_FFA_GET_API_VERSION:
749 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
750 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
751 			      0);
752 		break;
753 	case OPTEE_FFA_GET_OS_VERSION:
754 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
755 			      CFG_OPTEE_REVISION_MAJOR,
756 			      CFG_OPTEE_REVISION_MINOR,
757 			      TEE_IMPL_GIT_SHA1 >> 32);
758 		break;
759 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
760 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
761 		if (spmc_notif_is_ready)
762 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
763 		spmc_set_args(args, direct_resp_fid,
764 			      swap_src_dst(args->a1), 0, 0,
765 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
766 		break;
767 	case OPTEE_FFA_UNREGISTER_SHM:
768 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
769 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
770 		break;
771 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
772 		spmc_set_args(args, direct_resp_fid,
773 			      swap_src_dst(args->a1), 0,
774 			      spmc_enable_async_notif(args->a4,
775 						      FFA_SRC(args->a1)),
776 			      0, 0);
777 		break;
778 	default:
779 		EMSG("Unhandled blocking service ID %#"PRIx32,
780 		     (uint32_t)args->a3);
781 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
782 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
783 	}
784 }
785 
786 static void handle_framework_direct_request(struct thread_smc_args *args,
787 					    struct ffa_rxtx *rxtx,
788 					    uint32_t direct_resp_fid)
789 {
790 	uint32_t w0 = FFA_ERROR;
791 	uint32_t w1 = FFA_PARAM_MBZ;
792 	uint32_t w2 = FFA_NOT_SUPPORTED;
793 	uint32_t w3 = FFA_PARAM_MBZ;
794 
795 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
796 	case FFA_MSG_SEND_VM_CREATED:
797 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
798 			uint16_t guest_id = args->a5;
799 			TEE_Result res = virt_guest_created(guest_id);
800 
801 			w0 = direct_resp_fid;
802 			w1 = swap_src_dst(args->a1);
803 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
804 			if (res == TEE_SUCCESS)
805 				w3 = FFA_OK;
806 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
807 				w3 = FFA_DENIED;
808 			else
809 				w3 = FFA_INVALID_PARAMETERS;
810 		}
811 		break;
812 	case FFA_MSG_SEND_VM_DESTROYED:
813 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
814 			uint16_t guest_id = args->a5;
815 			TEE_Result res = virt_guest_destroyed(guest_id);
816 
817 			w0 = direct_resp_fid;
818 			w1 = swap_src_dst(args->a1);
819 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
820 			if (res == TEE_SUCCESS)
821 				w3 = FFA_OK;
822 			else
823 				w3 = FFA_INVALID_PARAMETERS;
824 		}
825 		break;
826 	case FFA_MSG_VERSION_REQ:
827 		w0 = direct_resp_fid;
828 		w1 = swap_src_dst(args->a1);
829 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
830 		w3 = spmc_exchange_version(args->a3, rxtx);
831 		break;
832 	default:
833 		break;
834 	}
835 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
836 }
837 
838 static void handle_direct_request(struct thread_smc_args *args,
839 				  struct ffa_rxtx *rxtx)
840 {
841 	uint32_t direct_resp_fid = 0;
842 
843 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
844 	    FFA_DST(args->a1) != spmc_id &&
845 	    FFA_DST(args->a1) != optee_endpoint_id) {
846 		spmc_sp_start_thread(args);
847 		return;
848 	}
849 
850 	if (OPTEE_SMC_IS_64(args->a0))
851 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
852 	else
853 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
854 
855 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
856 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
857 		return;
858 	}
859 
860 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
861 	    virt_set_guest(get_sender_id(args->a1))) {
862 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
863 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
864 		return;
865 	}
866 
867 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
868 		handle_yielding_call(args, direct_resp_fid);
869 	else
870 		handle_blocking_call(args, direct_resp_fid);
871 
872 	/*
873 	 * Note that handle_yielding_call() typically only returns if a
874 	 * thread cannot be allocated or found. virt_unset_guest() is also
875 	 * called from thread_state_suspend() and thread_state_free().
876 	 */
877 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
878 		virt_unset_guest();
879 }
880 
881 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
882 			      struct ffa_mem_transaction_x *trans)
883 {
884 	uint16_t mem_reg_attr = 0;
885 	uint32_t flags = 0;
886 	uint32_t count = 0;
887 	uint32_t offs = 0;
888 	uint32_t size = 0;
889 	size_t n = 0;
890 
891 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
892 		return FFA_INVALID_PARAMETERS;
893 
894 	if (ffa_vers >= FFA_VERSION_1_1) {
895 		struct ffa_mem_transaction_1_1 *descr = NULL;
896 
897 		if (blen < sizeof(*descr))
898 			return FFA_INVALID_PARAMETERS;
899 
900 		descr = buf;
901 		trans->sender_id = READ_ONCE(descr->sender_id);
902 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
903 		flags = READ_ONCE(descr->flags);
904 		trans->global_handle = READ_ONCE(descr->global_handle);
905 		trans->tag = READ_ONCE(descr->tag);
906 
907 		count = READ_ONCE(descr->mem_access_count);
908 		size = READ_ONCE(descr->mem_access_size);
909 		offs = READ_ONCE(descr->mem_access_offs);
910 	} else {
911 		struct ffa_mem_transaction_1_0 *descr = NULL;
912 
913 		if (blen < sizeof(*descr))
914 			return FFA_INVALID_PARAMETERS;
915 
916 		descr = buf;
917 		trans->sender_id = READ_ONCE(descr->sender_id);
918 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
919 		flags = READ_ONCE(descr->flags);
920 		trans->global_handle = READ_ONCE(descr->global_handle);
921 		trans->tag = READ_ONCE(descr->tag);
922 
923 		count = READ_ONCE(descr->mem_access_count);
924 		size = sizeof(struct ffa_mem_access);
925 		offs = offsetof(struct ffa_mem_transaction_1_0,
926 				mem_access_array);
927 	}
928 
929 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
930 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
931 		return FFA_INVALID_PARAMETERS;
932 
933 	/* Check that the endpoint memory access descriptor array fits */
934 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
935 	    n > blen)
936 		return FFA_INVALID_PARAMETERS;
937 
938 	trans->mem_reg_attr = mem_reg_attr;
939 	trans->flags = flags;
940 	trans->mem_access_size = size;
941 	trans->mem_access_count = count;
942 	trans->mem_access_offs = offs;
943 	return 0;
944 }
945 
946 #if defined(CFG_CORE_SEL1_SPMC)
947 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
948 			 unsigned int mem_access_count, uint8_t *acc_perms,
949 			 unsigned int *region_offs)
950 {
951 	struct ffa_mem_access_perm *descr = NULL;
952 	struct ffa_mem_access *mem_acc = NULL;
953 	unsigned int n = 0;
954 
955 	for (n = 0; n < mem_access_count; n++) {
956 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
957 		descr = &mem_acc->access_perm;
958 		if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) {
959 			*acc_perms = READ_ONCE(descr->perm);
960 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
961 			return 0;
962 		}
963 	}
964 
965 	return FFA_INVALID_PARAMETERS;
966 }
967 
968 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
969 			  size_t blen, unsigned int *page_count,
970 			  unsigned int *region_count, size_t *addr_range_offs)
971 {
972 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
973 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
974 	struct ffa_mem_region *region_descr = NULL;
975 	unsigned int region_descr_offs = 0;
976 	uint8_t mem_acc_perm = 0;
977 	size_t n = 0;
978 
979 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
980 		return FFA_INVALID_PARAMETERS;
981 
982 	/* Check that the access permissions matches what's expected */
983 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
984 			  mem_trans->mem_access_size,
985 			  mem_trans->mem_access_count,
986 			  &mem_acc_perm, &region_descr_offs) ||
987 	    mem_acc_perm != exp_mem_acc_perm)
988 		return FFA_INVALID_PARAMETERS;
989 
990 	/* Check that the Composite memory region descriptor fits */
991 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
992 	    n > blen)
993 		return FFA_INVALID_PARAMETERS;
994 
995 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
996 				  struct ffa_mem_region))
997 		return FFA_INVALID_PARAMETERS;
998 
999 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1000 						 region_descr_offs);
1001 	*page_count = READ_ONCE(region_descr->total_page_count);
1002 	*region_count = READ_ONCE(region_descr->address_range_count);
1003 	*addr_range_offs = n;
1004 	return 0;
1005 }
1006 
1007 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1008 				size_t flen)
1009 {
1010 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1011 	struct ffa_address_range *arange = NULL;
1012 	unsigned int n = 0;
1013 
1014 	if (region_count > s->region_count)
1015 		region_count = s->region_count;
1016 
1017 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1018 		return FFA_INVALID_PARAMETERS;
1019 	arange = buf;
1020 
1021 	for (n = 0; n < region_count; n++) {
1022 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1023 		uint64_t addr = READ_ONCE(arange[n].address);
1024 
1025 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1026 					  addr, page_count))
1027 			return FFA_INVALID_PARAMETERS;
1028 	}
1029 
1030 	s->region_count -= region_count;
1031 	if (s->region_count)
1032 		return region_count * sizeof(*arange);
1033 
1034 	if (s->current_page_idx != s->page_count)
1035 		return FFA_INVALID_PARAMETERS;
1036 
1037 	return 0;
1038 }
1039 
1040 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1041 {
1042 	int rc = 0;
1043 
1044 	rc = add_mem_share_helper(&s->share, buf, flen);
1045 	if (rc >= 0) {
1046 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1047 			/* We're not at the end of the descriptor yet */
1048 			if (s->share.region_count)
1049 				return s->frag_offset;
1050 
1051 			/* We're done */
1052 			rc = 0;
1053 		} else {
1054 			rc = FFA_INVALID_PARAMETERS;
1055 		}
1056 	}
1057 
1058 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1059 	if (rc < 0)
1060 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1061 	else
1062 		mobj_ffa_push_to_inactive(s->share.mf);
1063 	free(s);
1064 
1065 	return rc;
1066 }
1067 
1068 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1069 			void *buf)
1070 {
1071 	struct ffa_mem_access_perm *perm = NULL;
1072 	struct ffa_mem_access *mem_acc = NULL;
1073 
1074 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1075 		return false;
1076 
1077 	if (mem_trans->mem_access_count < 1)
1078 		return false;
1079 
1080 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1081 	perm = &mem_acc->access_perm;
1082 
1083 	/*
1084 	 * perm->endpoint_id is read here only to check if the endpoint is
1085 	 * OP-TEE. We do read it later on again, but there are some additional
1086 	 * checks there to make sure that the data is correct.
1087 	 */
1088 	return READ_ONCE(perm->endpoint_id) != optee_endpoint_id;
1089 }
1090 
1091 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1092 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1093 			 size_t flen, uint64_t *global_handle)
1094 {
1095 	int rc = 0;
1096 	struct mem_share_state share = { };
1097 	size_t addr_range_offs = 0;
1098 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1099 	size_t n = 0;
1100 
1101 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1102 			    &share.region_count, &addr_range_offs);
1103 	if (rc)
1104 		return rc;
1105 
1106 	if (!share.page_count || !share.region_count)
1107 		return FFA_INVALID_PARAMETERS;
1108 
1109 	if (MUL_OVERFLOW(share.region_count,
1110 			 sizeof(struct ffa_address_range), &n) ||
1111 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1112 		return FFA_INVALID_PARAMETERS;
1113 
1114 	if (mem_trans->global_handle)
1115 		cookie = mem_trans->global_handle;
1116 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1117 	if (!share.mf)
1118 		return FFA_NO_MEMORY;
1119 
1120 	if (flen != blen) {
1121 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1122 
1123 		if (!s) {
1124 			rc = FFA_NO_MEMORY;
1125 			goto err;
1126 		}
1127 		s->share = share;
1128 		s->mm = mm;
1129 		s->frag_offset = addr_range_offs;
1130 
1131 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1132 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1133 					flen - addr_range_offs);
1134 
1135 		if (rc >= 0)
1136 			*global_handle = mobj_ffa_get_cookie(share.mf);
1137 
1138 		return rc;
1139 	}
1140 
1141 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1142 				  flen - addr_range_offs);
1143 	if (rc) {
1144 		/*
1145 		 * Number of consumed bytes may be returned instead of 0 for
1146 		 * done.
1147 		 */
1148 		rc = FFA_INVALID_PARAMETERS;
1149 		goto err;
1150 	}
1151 
1152 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1153 
1154 	return 0;
1155 err:
1156 	mobj_ffa_sel1_spmc_delete(share.mf);
1157 	return rc;
1158 }
1159 
1160 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1161 				 unsigned int page_count,
1162 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1163 {
1164 	struct ffa_mem_transaction_x mem_trans = { };
1165 	int rc = 0;
1166 	size_t len = 0;
1167 	void *buf = NULL;
1168 	tee_mm_entry_t *mm = NULL;
1169 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1170 
1171 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1172 		return FFA_INVALID_PARAMETERS;
1173 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1174 		return FFA_INVALID_PARAMETERS;
1175 
1176 	/*
1177 	 * Check that the length reported in flen is covered by len even
1178 	 * if the offset is taken into account.
1179 	 */
1180 	if (len < flen || len - offs < flen)
1181 		return FFA_INVALID_PARAMETERS;
1182 
1183 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1184 	if (!mm)
1185 		return FFA_NO_MEMORY;
1186 
1187 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1188 					  page_count, MEM_AREA_NSEC_SHM)) {
1189 		rc = FFA_INVALID_PARAMETERS;
1190 		goto out;
1191 	}
1192 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1193 
1194 	cpu_spin_lock(&rxtx->spinlock);
1195 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1196 	if (rc)
1197 		goto unlock;
1198 
1199 	if (is_sp_share(&mem_trans, buf)) {
1200 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1201 				       global_handle, NULL);
1202 		goto unlock;
1203 	}
1204 
1205 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1206 	    virt_set_guest(mem_trans.sender_id)) {
1207 		rc = FFA_DENIED;
1208 		goto unlock;
1209 	}
1210 
1211 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1212 
1213 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1214 		virt_unset_guest();
1215 
1216 unlock:
1217 	cpu_spin_unlock(&rxtx->spinlock);
1218 	if (rc > 0)
1219 		return rc;
1220 
1221 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1222 out:
1223 	tee_mm_free(mm);
1224 	return rc;
1225 }
1226 
1227 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1228 				  uint64_t *global_handle,
1229 				  struct ffa_rxtx *rxtx)
1230 {
1231 	struct ffa_mem_transaction_x mem_trans = { };
1232 	int rc = FFA_DENIED;
1233 
1234 	cpu_spin_lock(&rxtx->spinlock);
1235 
1236 	if (!rxtx->rx || flen > rxtx->size)
1237 		goto out;
1238 
1239 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1240 				       &mem_trans);
1241 	if (rc)
1242 		goto out;
1243 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1244 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1245 				       global_handle, NULL);
1246 		goto out;
1247 	}
1248 
1249 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1250 	    virt_set_guest(mem_trans.sender_id))
1251 		goto out;
1252 
1253 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1254 			   global_handle);
1255 
1256 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1257 		virt_unset_guest();
1258 
1259 out:
1260 	cpu_spin_unlock(&rxtx->spinlock);
1261 
1262 	return rc;
1263 }
1264 
1265 static void handle_mem_share(struct thread_smc_args *args,
1266 			     struct ffa_rxtx *rxtx)
1267 {
1268 	uint32_t tot_len = args->a1;
1269 	uint32_t frag_len = args->a2;
1270 	uint64_t addr = args->a3;
1271 	uint32_t page_count = args->a4;
1272 	uint32_t ret_w1 = 0;
1273 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1274 	uint32_t ret_w3 = 0;
1275 	uint32_t ret_fid = FFA_ERROR;
1276 	uint64_t global_handle = 0;
1277 	int rc = 0;
1278 
1279 	/* Check that the MBZs are indeed 0 */
1280 	if (args->a5 || args->a6 || args->a7)
1281 		goto out;
1282 
1283 	/* Check that fragment length doesn't exceed total length */
1284 	if (frag_len > tot_len)
1285 		goto out;
1286 
1287 	/* Check for 32-bit calling convention */
1288 	if (args->a0 == FFA_MEM_SHARE_32)
1289 		addr &= UINT32_MAX;
1290 
1291 	if (!addr) {
1292 		/*
1293 		 * The memory transaction descriptor is passed via our rx
1294 		 * buffer.
1295 		 */
1296 		if (page_count)
1297 			goto out;
1298 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1299 					    rxtx);
1300 	} else {
1301 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1302 					   &global_handle, rxtx);
1303 	}
1304 	if (rc < 0) {
1305 		ret_w2 = rc;
1306 	} else if (rc > 0) {
1307 		ret_fid = FFA_MEM_FRAG_RX;
1308 		ret_w3 = rc;
1309 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1310 	} else {
1311 		ret_fid = FFA_SUCCESS_32;
1312 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1313 	}
1314 out:
1315 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1316 }
1317 
1318 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1319 {
1320 	struct mem_frag_state *s = NULL;
1321 
1322 	SLIST_FOREACH(s, &frag_state_head, link)
1323 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1324 			return s;
1325 
1326 	return NULL;
1327 }
1328 
1329 static void handle_mem_frag_tx(struct thread_smc_args *args,
1330 			       struct ffa_rxtx *rxtx)
1331 {
1332 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1333 	size_t flen = args->a3;
1334 	uint32_t endpoint_id = args->a4;
1335 	struct mem_frag_state *s = NULL;
1336 	tee_mm_entry_t *mm = NULL;
1337 	unsigned int page_count = 0;
1338 	void *buf = NULL;
1339 	uint32_t ret_w1 = 0;
1340 	uint32_t ret_w2 = 0;
1341 	uint32_t ret_w3 = 0;
1342 	uint32_t ret_fid = 0;
1343 	int rc = 0;
1344 
1345 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1346 		uint16_t guest_id = endpoint_id >> 16;
1347 
1348 		if (!guest_id || virt_set_guest(guest_id)) {
1349 			rc = FFA_INVALID_PARAMETERS;
1350 			goto out_set_rc;
1351 		}
1352 	}
1353 
1354 	/*
1355 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1356 	 * requests.
1357 	 */
1358 
1359 	cpu_spin_lock(&rxtx->spinlock);
1360 
1361 	s = get_frag_state(global_handle);
1362 	if (!s) {
1363 		rc = FFA_INVALID_PARAMETERS;
1364 		goto out;
1365 	}
1366 
1367 	mm = s->mm;
1368 	if (mm) {
1369 		if (flen > tee_mm_get_bytes(mm)) {
1370 			rc = FFA_INVALID_PARAMETERS;
1371 			goto out;
1372 		}
1373 		page_count = s->share.page_count;
1374 		buf = (void *)tee_mm_get_smem(mm);
1375 	} else {
1376 		if (flen > rxtx->size) {
1377 			rc = FFA_INVALID_PARAMETERS;
1378 			goto out;
1379 		}
1380 		buf = rxtx->rx;
1381 	}
1382 
1383 	rc = add_mem_share_frag(s, buf, flen);
1384 out:
1385 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1386 		virt_unset_guest();
1387 
1388 	cpu_spin_unlock(&rxtx->spinlock);
1389 
1390 	if (rc <= 0 && mm) {
1391 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1392 		tee_mm_free(mm);
1393 	}
1394 
1395 out_set_rc:
1396 	if (rc < 0) {
1397 		ret_fid = FFA_ERROR;
1398 		ret_w2 = rc;
1399 	} else if (rc > 0) {
1400 		ret_fid = FFA_MEM_FRAG_RX;
1401 		ret_w3 = rc;
1402 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1403 	} else {
1404 		ret_fid = FFA_SUCCESS_32;
1405 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1406 	}
1407 
1408 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1409 }
1410 
1411 static void handle_mem_reclaim(struct thread_smc_args *args)
1412 {
1413 	int rc = FFA_INVALID_PARAMETERS;
1414 	uint64_t cookie = 0;
1415 
1416 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1417 		goto out;
1418 
1419 	cookie = reg_pair_to_64(args->a2, args->a1);
1420 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1421 		uint16_t guest_id = 0;
1422 
1423 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1424 			guest_id = virt_find_guest_by_cookie(cookie);
1425 		} else {
1426 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1427 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1428 		}
1429 		if (!guest_id)
1430 			goto out;
1431 		if (virt_set_guest(guest_id)) {
1432 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1433 								      cookie))
1434 				rc = FFA_OK;
1435 			goto out;
1436 		}
1437 	}
1438 
1439 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1440 	case TEE_SUCCESS:
1441 		rc = FFA_OK;
1442 		break;
1443 	case TEE_ERROR_ITEM_NOT_FOUND:
1444 		DMSG("cookie %#"PRIx64" not found", cookie);
1445 		rc = FFA_INVALID_PARAMETERS;
1446 		break;
1447 	default:
1448 		DMSG("cookie %#"PRIx64" busy", cookie);
1449 		rc = FFA_DENIED;
1450 		break;
1451 	}
1452 
1453 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1454 		virt_unset_guest();
1455 
1456 out:
1457 	set_simple_ret_val(args, rc);
1458 }
1459 
1460 static void handle_notification_bitmap_create(struct thread_smc_args *args)
1461 {
1462 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1463 	uint32_t ret_fid = FFA_ERROR;
1464 	uint32_t old_itr_status = 0;
1465 
1466 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1467 	    !args->a5 && !args->a6 && !args->a7) {
1468 		uint16_t vm_id = args->a1;
1469 
1470 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1471 
1472 		if (notif_vm_id_valid) {
1473 			if (vm_id == notif_vm_id)
1474 				ret_val = FFA_DENIED;
1475 			else
1476 				ret_val = FFA_NO_MEMORY;
1477 		} else {
1478 			notif_vm_id = vm_id;
1479 			notif_vm_id_valid = true;
1480 			ret_val = FFA_OK;
1481 			ret_fid = FFA_SUCCESS_32;
1482 		}
1483 
1484 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1485 	}
1486 
1487 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1488 }
1489 
1490 static void handle_notification_bitmap_destroy(struct thread_smc_args *args)
1491 {
1492 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1493 	uint32_t ret_fid = FFA_ERROR;
1494 	uint32_t old_itr_status = 0;
1495 
1496 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1497 	    !args->a5 && !args->a6 && !args->a7) {
1498 		uint16_t vm_id = args->a1;
1499 
1500 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1501 
1502 		if (notif_vm_id_valid && vm_id == notif_vm_id) {
1503 			if (notif_pending_bitmap || notif_bound_bitmap) {
1504 				ret_val = FFA_DENIED;
1505 			} else {
1506 				notif_vm_id_valid = false;
1507 				ret_val = FFA_OK;
1508 				ret_fid = FFA_SUCCESS_32;
1509 			}
1510 		}
1511 
1512 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1513 	}
1514 
1515 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1516 }
1517 
1518 static void handle_notification_bind(struct thread_smc_args *args)
1519 {
1520 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1521 	uint32_t ret_fid = FFA_ERROR;
1522 	uint32_t old_itr_status = 0;
1523 	uint64_t bitmap = 0;
1524 	uint16_t vm_id = 0;
1525 
1526 	if (args->a5 || args->a6 || args->a7)
1527 		goto out;
1528 	if (args->a2) {
1529 		/* We only deal with global notifications for now */
1530 		ret_val = FFA_NOT_SUPPORTED;
1531 		goto out;
1532 	}
1533 
1534 	/* The destination of the eventual notification */
1535 	vm_id = FFA_DST(args->a1);
1536 	bitmap = reg_pair_to_64(args->a4, args->a3);
1537 
1538 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1539 
1540 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1541 		if (bitmap & notif_bound_bitmap) {
1542 			ret_val = FFA_DENIED;
1543 		} else {
1544 			notif_bound_bitmap |= bitmap;
1545 			ret_val = FFA_OK;
1546 			ret_fid = FFA_SUCCESS_32;
1547 		}
1548 	}
1549 
1550 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1551 out:
1552 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1553 }
1554 
1555 static void handle_notification_unbind(struct thread_smc_args *args)
1556 {
1557 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1558 	uint32_t ret_fid = FFA_ERROR;
1559 	uint32_t old_itr_status = 0;
1560 	uint64_t bitmap = 0;
1561 	uint16_t vm_id = 0;
1562 
1563 	if (args->a2 || args->a5 || args->a6 || args->a7)
1564 		goto out;
1565 
1566 	/* The destination of the eventual notification */
1567 	vm_id = FFA_DST(args->a1);
1568 	bitmap = reg_pair_to_64(args->a4, args->a3);
1569 
1570 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1571 
1572 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1573 		/*
1574 		 * Spec says:
1575 		 * At least one notification is bound to another Sender or
1576 		 * is currently pending.
1577 		 *
1578 		 * Not sure what the intention is.
1579 		 */
1580 		if (bitmap & notif_pending_bitmap) {
1581 			ret_val = FFA_DENIED;
1582 		} else {
1583 			notif_bound_bitmap &= ~bitmap;
1584 			ret_val = FFA_OK;
1585 			ret_fid = FFA_SUCCESS_32;
1586 		}
1587 	}
1588 
1589 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1590 out:
1591 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1592 }
1593 
1594 static void handle_notification_get(struct thread_smc_args *args)
1595 {
1596 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1597 	uint32_t ret_fid = FFA_ERROR;
1598 	uint32_t old_itr_status = 0;
1599 	uint16_t vm_id = 0;
1600 	uint32_t w3 = 0;
1601 
1602 	if (args->a5 || args->a6 || args->a7)
1603 		goto out;
1604 	if (!(args->a2 & 0x1)) {
1605 		ret_fid = FFA_SUCCESS_32;
1606 		w2 = 0;
1607 		goto out;
1608 	}
1609 	vm_id = FFA_DST(args->a1);
1610 
1611 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1612 
1613 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1614 		reg_pair_from_64(notif_pending_bitmap, &w3, &w2);
1615 		notif_pending_bitmap = 0;
1616 		ret_fid = FFA_SUCCESS_32;
1617 	}
1618 
1619 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1620 out:
1621 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1622 }
1623 
1624 static void handle_notification_info_get(struct thread_smc_args *args)
1625 {
1626 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1627 	uint32_t ret_fid = FFA_ERROR;
1628 
1629 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1630 	    args->a6 || args->a7)
1631 		goto out;
1632 
1633 	if (OPTEE_SMC_IS_64(args->a0))
1634 		ret_fid = FFA_SUCCESS_64;
1635 	else
1636 		ret_fid = FFA_SUCCESS_32;
1637 
1638 	/*
1639 	 * Note, we're only supporting physical OS kernel in normal world
1640 	 * with Global Notifications.
1641 	 * So one list of ID list registers (BIT[11:7])
1642 	 * and one count of IDs (BIT[13:12] + 1)
1643 	 * and the VM is always 0.
1644 	 */
1645 	w2 = SHIFT_U32(1, 7);
1646 out:
1647 	spmc_set_args(args, ret_fid, 0, w2, 0, 0, 0);
1648 }
1649 
1650 void thread_spmc_set_async_notif_intid(int intid)
1651 {
1652 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1653 	notif_intid = intid;
1654 	spmc_notif_is_ready = true;
1655 	DMSG("Asynchronous notifications are ready");
1656 }
1657 
1658 void notif_send_async(uint32_t value)
1659 {
1660 	uint32_t old_itr_status = 0;
1661 
1662 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1663 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1664 	       do_bottom_half_value >= 0 && notif_intid >= 0);
1665 	notif_pending_bitmap |= BIT64(do_bottom_half_value);
1666 	interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1667 			    ITR_CPU_MASK_TO_THIS_CPU);
1668 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1669 }
1670 #else
1671 void notif_send_async(uint32_t value)
1672 {
1673 	/* global notification, delay notification interrupt */
1674 	uint32_t flags = BIT32(1);
1675 	int res = 0;
1676 
1677 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1678 	       do_bottom_half_value >= 0);
1679 	res = ffa_set_notification(notif_vm_id, optee_endpoint_id, flags,
1680 				   BIT64(do_bottom_half_value));
1681 	if (res) {
1682 		EMSG("notification set failed with error %d", res);
1683 		panic();
1684 	}
1685 }
1686 #endif
1687 
1688 /* Only called from assembly */
1689 void thread_spmc_msg_recv(struct thread_smc_args *args);
1690 void thread_spmc_msg_recv(struct thread_smc_args *args)
1691 {
1692 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1693 	switch (args->a0) {
1694 #if defined(CFG_CORE_SEL1_SPMC)
1695 	case FFA_FEATURES:
1696 		handle_features(args);
1697 		break;
1698 	case FFA_SPM_ID_GET:
1699 		spmc_handle_spm_id_get(args);
1700 		break;
1701 #ifdef ARM64
1702 	case FFA_RXTX_MAP_64:
1703 #endif
1704 	case FFA_RXTX_MAP_32:
1705 		spmc_handle_rxtx_map(args, &my_rxtx);
1706 		break;
1707 	case FFA_RXTX_UNMAP:
1708 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1709 		break;
1710 	case FFA_RX_RELEASE:
1711 		spmc_handle_rx_release(args, &my_rxtx);
1712 		break;
1713 	case FFA_PARTITION_INFO_GET:
1714 		spmc_handle_partition_info_get(args, &my_rxtx);
1715 		break;
1716 	case FFA_RUN:
1717 		spmc_handle_run(args);
1718 		break;
1719 #endif /*CFG_CORE_SEL1_SPMC*/
1720 	case FFA_INTERRUPT:
1721 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1722 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1723 				      0, 0);
1724 		else
1725 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1726 		break;
1727 #ifdef ARM64
1728 	case FFA_MSG_SEND_DIRECT_REQ_64:
1729 #endif
1730 	case FFA_MSG_SEND_DIRECT_REQ_32:
1731 		handle_direct_request(args, &my_rxtx);
1732 		break;
1733 #if defined(CFG_CORE_SEL1_SPMC)
1734 #ifdef ARM64
1735 	case FFA_MEM_SHARE_64:
1736 #endif
1737 	case FFA_MEM_SHARE_32:
1738 		handle_mem_share(args, &my_rxtx);
1739 		break;
1740 	case FFA_MEM_RECLAIM:
1741 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1742 		    !ffa_mem_reclaim(args, NULL))
1743 			handle_mem_reclaim(args);
1744 		break;
1745 	case FFA_MEM_FRAG_TX:
1746 		handle_mem_frag_tx(args, &my_rxtx);
1747 		break;
1748 	case FFA_NOTIFICATION_BITMAP_CREATE:
1749 		handle_notification_bitmap_create(args);
1750 		break;
1751 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1752 		handle_notification_bitmap_destroy(args);
1753 		break;
1754 	case FFA_NOTIFICATION_BIND:
1755 		handle_notification_bind(args);
1756 		break;
1757 	case FFA_NOTIFICATION_UNBIND:
1758 		handle_notification_unbind(args);
1759 		break;
1760 	case FFA_NOTIFICATION_GET:
1761 		handle_notification_get(args);
1762 		break;
1763 #ifdef ARM64
1764 	case FFA_NOTIFICATION_INFO_GET_64:
1765 #endif
1766 	case FFA_NOTIFICATION_INFO_GET_32:
1767 		handle_notification_info_get(args);
1768 		break;
1769 #endif /*CFG_CORE_SEL1_SPMC*/
1770 	case FFA_ERROR:
1771 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1772 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1773 			/*
1774 			 * The SPMC will return an FFA_ERROR back so better
1775 			 * panic() now than flooding the log.
1776 			 */
1777 			panic("FFA_ERROR from SPMC is fatal");
1778 		}
1779 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1780 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1781 		break;
1782 	default:
1783 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1784 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
1785 	}
1786 }
1787 
1788 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1789 {
1790 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1791 	struct thread_ctx *thr = threads + thread_get_id();
1792 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1793 	struct optee_msg_arg *arg = NULL;
1794 	struct mobj *mobj = NULL;
1795 	uint32_t num_params = 0;
1796 	size_t sz = 0;
1797 
1798 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1799 	if (!mobj) {
1800 		EMSG("Can't find cookie %#"PRIx64, cookie);
1801 		return TEE_ERROR_BAD_PARAMETERS;
1802 	}
1803 
1804 	res = mobj_inc_map(mobj);
1805 	if (res)
1806 		goto out_put_mobj;
1807 
1808 	res = TEE_ERROR_BAD_PARAMETERS;
1809 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1810 	if (!arg)
1811 		goto out_dec_map;
1812 
1813 	num_params = READ_ONCE(arg->num_params);
1814 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1815 		goto out_dec_map;
1816 
1817 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1818 
1819 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1820 	if (!thr->rpc_arg)
1821 		goto out_dec_map;
1822 
1823 	virt_on_stdcall();
1824 	res = tee_entry_std(arg, num_params);
1825 
1826 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1827 	thr->rpc_arg = NULL;
1828 
1829 out_dec_map:
1830 	mobj_dec_map(mobj);
1831 out_put_mobj:
1832 	mobj_put(mobj);
1833 	return res;
1834 }
1835 
1836 /*
1837  * Helper routine for the assembly function thread_std_smc_entry()
1838  *
1839  * Note: this function is weak just to make link_dummies_paged.c happy.
1840  */
1841 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1842 				       uint32_t a2, uint32_t a3,
1843 				       uint32_t a4, uint32_t a5 __unused)
1844 {
1845 	/*
1846 	 * Arguments are supplied from handle_yielding_call() as:
1847 	 * a0 <- w1
1848 	 * a1 <- w3
1849 	 * a2 <- w4
1850 	 * a3 <- w5
1851 	 * a4 <- w6
1852 	 * a5 <- w7
1853 	 */
1854 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1855 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1856 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1857 	return FFA_DENIED;
1858 }
1859 
1860 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1861 {
1862 	uint64_t offs = tpm->u.memref.offs;
1863 
1864 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1865 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1866 
1867 	param->u.fmem.offs_low = offs;
1868 	param->u.fmem.offs_high = offs >> 32;
1869 	if (param->u.fmem.offs_high != offs >> 32)
1870 		return false;
1871 
1872 	param->u.fmem.size = tpm->u.memref.size;
1873 	if (tpm->u.memref.mobj) {
1874 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1875 
1876 		/* If a mobj is passed it better be one with a valid cookie. */
1877 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1878 			return false;
1879 		param->u.fmem.global_id = cookie;
1880 	} else {
1881 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1882 	}
1883 
1884 	return true;
1885 }
1886 
1887 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1888 			    struct thread_param *params,
1889 			    struct optee_msg_arg **arg_ret)
1890 {
1891 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1892 	struct thread_ctx *thr = threads + thread_get_id();
1893 	struct optee_msg_arg *arg = thr->rpc_arg;
1894 
1895 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1896 		return TEE_ERROR_BAD_PARAMETERS;
1897 
1898 	if (!arg) {
1899 		EMSG("rpc_arg not set");
1900 		return TEE_ERROR_GENERIC;
1901 	}
1902 
1903 	memset(arg, 0, sz);
1904 	arg->cmd = cmd;
1905 	arg->num_params = num_params;
1906 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1907 
1908 	for (size_t n = 0; n < num_params; n++) {
1909 		switch (params[n].attr) {
1910 		case THREAD_PARAM_ATTR_NONE:
1911 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1912 			break;
1913 		case THREAD_PARAM_ATTR_VALUE_IN:
1914 		case THREAD_PARAM_ATTR_VALUE_OUT:
1915 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1916 			arg->params[n].attr = params[n].attr -
1917 					      THREAD_PARAM_ATTR_VALUE_IN +
1918 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1919 			arg->params[n].u.value.a = params[n].u.value.a;
1920 			arg->params[n].u.value.b = params[n].u.value.b;
1921 			arg->params[n].u.value.c = params[n].u.value.c;
1922 			break;
1923 		case THREAD_PARAM_ATTR_MEMREF_IN:
1924 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1925 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1926 			if (!set_fmem(arg->params + n, params + n))
1927 				return TEE_ERROR_BAD_PARAMETERS;
1928 			break;
1929 		default:
1930 			return TEE_ERROR_BAD_PARAMETERS;
1931 		}
1932 	}
1933 
1934 	if (arg_ret)
1935 		*arg_ret = arg;
1936 
1937 	return TEE_SUCCESS;
1938 }
1939 
1940 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1941 				struct thread_param *params)
1942 {
1943 	for (size_t n = 0; n < num_params; n++) {
1944 		switch (params[n].attr) {
1945 		case THREAD_PARAM_ATTR_VALUE_OUT:
1946 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1947 			params[n].u.value.a = arg->params[n].u.value.a;
1948 			params[n].u.value.b = arg->params[n].u.value.b;
1949 			params[n].u.value.c = arg->params[n].u.value.c;
1950 			break;
1951 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1952 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1953 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1954 			break;
1955 		default:
1956 			break;
1957 		}
1958 	}
1959 
1960 	return arg->ret;
1961 }
1962 
1963 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1964 			struct thread_param *params)
1965 {
1966 	struct thread_rpc_arg rpc_arg = { .call = {
1967 			.w1 = thread_get_tsd()->rpc_target_info,
1968 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1969 		},
1970 	};
1971 	struct optee_msg_arg *arg = NULL;
1972 	uint32_t ret = 0;
1973 
1974 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1975 	if (ret)
1976 		return ret;
1977 
1978 	thread_rpc(&rpc_arg);
1979 
1980 	return get_rpc_arg_res(arg, num_params, params);
1981 }
1982 
1983 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1984 {
1985 	struct thread_rpc_arg rpc_arg = { .call = {
1986 			.w1 = thread_get_tsd()->rpc_target_info,
1987 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1988 		},
1989 	};
1990 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1991 	uint32_t res2 = 0;
1992 	uint32_t res = 0;
1993 
1994 	DMSG("freeing cookie %#"PRIx64, cookie);
1995 
1996 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1997 
1998 	mobj_put(mobj);
1999 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2000 	if (res2)
2001 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2002 		     cookie, res2);
2003 	if (!res)
2004 		thread_rpc(&rpc_arg);
2005 }
2006 
2007 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2008 {
2009 	struct thread_rpc_arg rpc_arg = { .call = {
2010 			.w1 = thread_get_tsd()->rpc_target_info,
2011 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2012 		},
2013 	};
2014 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2015 	struct optee_msg_arg *arg = NULL;
2016 	unsigned int internal_offset = 0;
2017 	struct mobj *mobj = NULL;
2018 	uint64_t cookie = 0;
2019 
2020 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2021 		return NULL;
2022 
2023 	thread_rpc(&rpc_arg);
2024 
2025 	if (arg->num_params != 1 ||
2026 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2027 		return NULL;
2028 
2029 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2030 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2031 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2032 	if (!mobj) {
2033 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2034 		     cookie, internal_offset);
2035 		return NULL;
2036 	}
2037 
2038 	assert(mobj_is_nonsec(mobj));
2039 
2040 	if (mobj->size < size) {
2041 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2042 		mobj_put(mobj);
2043 		return NULL;
2044 	}
2045 
2046 	if (mobj_inc_map(mobj)) {
2047 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2048 		mobj_put(mobj);
2049 		return NULL;
2050 	}
2051 
2052 	return mobj;
2053 }
2054 
2055 struct mobj *thread_rpc_alloc_payload(size_t size)
2056 {
2057 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2058 }
2059 
2060 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2061 {
2062 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2063 }
2064 
2065 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2066 {
2067 	if (mobj)
2068 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2069 				mobj_get_cookie(mobj), mobj);
2070 }
2071 
2072 void thread_rpc_free_payload(struct mobj *mobj)
2073 {
2074 	if (mobj)
2075 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2076 				mobj);
2077 }
2078 
2079 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2080 {
2081 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2082 }
2083 
2084 void thread_rpc_free_global_payload(struct mobj *mobj)
2085 {
2086 	if (mobj)
2087 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2088 				mobj_get_cookie(mobj), mobj);
2089 }
2090 
2091 void thread_spmc_register_secondary_ep(vaddr_t ep)
2092 {
2093 	unsigned long ret = 0;
2094 
2095 	/* Let the SPM know the entry point for secondary CPUs */
2096 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2097 
2098 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2099 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2100 }
2101 
2102 static uint16_t ffa_id_get(void)
2103 {
2104 	/*
2105 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2106 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2107 	 * the partition ID (if not).
2108 	 */
2109 	struct thread_smc_args args = {
2110 		.a0 = FFA_ID_GET,
2111 	};
2112 
2113 	thread_smccc(&args);
2114 	if (!is_ffa_success(args.a0)) {
2115 		if (args.a0 == FFA_ERROR)
2116 			EMSG("Get id failed with error %ld", args.a2);
2117 		else
2118 			EMSG("Get id failed");
2119 		panic();
2120 	}
2121 
2122 	return args.a2;
2123 }
2124 
2125 static uint16_t ffa_spm_id_get(void)
2126 {
2127 	/*
2128 	 * Ask the SPM component running at a higher EL to return its ID.
2129 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2130 	 * If not, the ID of the SPMC will be returned.
2131 	 */
2132 	struct thread_smc_args args = {
2133 		.a0 = FFA_SPM_ID_GET,
2134 	};
2135 
2136 	thread_smccc(&args);
2137 	if (!is_ffa_success(args.a0)) {
2138 		if (args.a0 == FFA_ERROR)
2139 			EMSG("Get spm id failed with error %ld", args.a2);
2140 		else
2141 			EMSG("Get spm id failed");
2142 		panic();
2143 	}
2144 
2145 	return args.a2;
2146 }
2147 
2148 #if defined(CFG_CORE_SEL1_SPMC)
2149 static TEE_Result spmc_init(void)
2150 {
2151 	spmd_id = ffa_spm_id_get();
2152 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2153 
2154 	spmc_id = ffa_id_get();
2155 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2156 
2157 	optee_endpoint_id = FFA_SWD_ID_MIN;
2158 	while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id)
2159 		optee_endpoint_id++;
2160 
2161 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2162 
2163 	/*
2164 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2165 	 * normal world regardless of what version we query the SPM with.
2166 	 * However, if SPMD think we are version 1.1 it will forward
2167 	 * queries from normal world to let us negotiate version. So by
2168 	 * setting version 1.0 here we should be compatible.
2169 	 *
2170 	 * Note that disagreement on negotiated version means that we'll
2171 	 * have communication problems with normal world.
2172 	 */
2173 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2174 
2175 	return TEE_SUCCESS;
2176 }
2177 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2178 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2179 {
2180 	struct thread_smc_args args = {
2181 #ifdef ARM64
2182 		.a0 = FFA_RXTX_MAP_64,
2183 #else
2184 		.a0 = FFA_RXTX_MAP_32,
2185 #endif
2186 		.a1 = virt_to_phys(rxtx->tx),
2187 		.a2 = virt_to_phys(rxtx->rx),
2188 		.a3 = 1,
2189 	};
2190 
2191 	thread_smccc(&args);
2192 	if (!is_ffa_success(args.a0)) {
2193 		if (args.a0 == FFA_ERROR)
2194 			EMSG("rxtx map failed with error %ld", args.a2);
2195 		else
2196 			EMSG("rxtx map failed");
2197 		panic();
2198 	}
2199 }
2200 
2201 static uint32_t get_ffa_version(uint32_t my_version)
2202 {
2203 	struct thread_smc_args args = {
2204 		.a0 = FFA_VERSION,
2205 		.a1 = my_version,
2206 	};
2207 
2208 	thread_smccc(&args);
2209 	if (args.a0 & BIT(31)) {
2210 		EMSG("FF-A version failed with error %ld", args.a0);
2211 		panic();
2212 	}
2213 
2214 	return args.a0;
2215 }
2216 
2217 static void *spmc_retrieve_req(uint64_t cookie,
2218 			       struct ffa_mem_transaction_x *trans)
2219 {
2220 	struct ffa_mem_access *acc_descr_array = NULL;
2221 	struct ffa_mem_access_perm *perm_descr = NULL;
2222 	struct thread_smc_args args = {
2223 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2224 		.a3 =	0,	/* Address, Using TX -> MBZ */
2225 		.a4 =   0,	/* Using TX -> MBZ */
2226 	};
2227 	size_t size = 0;
2228 	int rc = 0;
2229 
2230 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2231 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2232 
2233 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2234 		memset(trans_descr, 0, size);
2235 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2236 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2237 		trans_descr->global_handle = cookie;
2238 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2239 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2240 		trans_descr->mem_access_count = 1;
2241 		acc_descr_array = trans_descr->mem_access_array;
2242 	} else {
2243 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2244 
2245 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2246 		memset(trans_descr, 0, size);
2247 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2248 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2249 		trans_descr->global_handle = cookie;
2250 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2251 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2252 		trans_descr->mem_access_count = 1;
2253 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2254 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2255 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2256 					   sizeof(*trans_descr));
2257 	}
2258 	acc_descr_array->region_offs = 0;
2259 	acc_descr_array->reserved = 0;
2260 	perm_descr = &acc_descr_array->access_perm;
2261 	perm_descr->endpoint_id = optee_endpoint_id;
2262 	perm_descr->perm = FFA_MEM_ACC_RW;
2263 	perm_descr->flags = 0;
2264 
2265 	args.a1 = size; /* Total Length */
2266 	args.a2 = size; /* Frag Length == Total length */
2267 	thread_smccc(&args);
2268 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2269 		if (args.a0 == FFA_ERROR)
2270 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2271 			     cookie, (int)args.a2);
2272 		else
2273 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2274 			     cookie, args.a0);
2275 		return NULL;
2276 	}
2277 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2278 				       my_rxtx.size, trans);
2279 	if (rc) {
2280 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2281 		     cookie, rc);
2282 		return NULL;
2283 	}
2284 
2285 	return my_rxtx.rx;
2286 }
2287 
2288 void thread_spmc_relinquish(uint64_t cookie)
2289 {
2290 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2291 	struct thread_smc_args args = {
2292 		.a0 = FFA_MEM_RELINQUISH,
2293 	};
2294 
2295 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2296 	relinquish_desc->handle = cookie;
2297 	relinquish_desc->flags = 0;
2298 	relinquish_desc->endpoint_count = 1;
2299 	relinquish_desc->endpoint_id_array[0] = optee_endpoint_id;
2300 	thread_smccc(&args);
2301 	if (!is_ffa_success(args.a0))
2302 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2303 }
2304 
2305 static int set_pages(struct ffa_address_range *regions,
2306 		     unsigned int num_regions, unsigned int num_pages,
2307 		     struct mobj_ffa *mf)
2308 {
2309 	unsigned int n = 0;
2310 	unsigned int idx = 0;
2311 
2312 	for (n = 0; n < num_regions; n++) {
2313 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2314 		uint64_t addr = READ_ONCE(regions[n].address);
2315 
2316 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2317 			return FFA_INVALID_PARAMETERS;
2318 	}
2319 
2320 	if (idx != num_pages)
2321 		return FFA_INVALID_PARAMETERS;
2322 
2323 	return 0;
2324 }
2325 
2326 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2327 {
2328 	struct mobj_ffa *ret = NULL;
2329 	struct ffa_mem_transaction_x retrieve_desc = { };
2330 	struct ffa_mem_access *descr_array = NULL;
2331 	struct ffa_mem_region *descr = NULL;
2332 	struct mobj_ffa *mf = NULL;
2333 	unsigned int num_pages = 0;
2334 	unsigned int offs = 0;
2335 	void *buf = NULL;
2336 	struct thread_smc_args ffa_rx_release_args = {
2337 		.a0 = FFA_RX_RELEASE
2338 	};
2339 
2340 	/*
2341 	 * OP-TEE is only supporting a single mem_region while the
2342 	 * specification allows for more than one.
2343 	 */
2344 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2345 	if (!buf) {
2346 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2347 		     cookie);
2348 		return NULL;
2349 	}
2350 
2351 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2352 	offs = READ_ONCE(descr_array->region_offs);
2353 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2354 
2355 	num_pages = READ_ONCE(descr->total_page_count);
2356 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2357 	if (!mf)
2358 		goto out;
2359 
2360 	if (set_pages(descr->address_range_array,
2361 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2362 		mobj_ffa_spmc_delete(mf);
2363 		goto out;
2364 	}
2365 
2366 	ret = mf;
2367 
2368 out:
2369 	/* Release RX buffer after the mem retrieve request. */
2370 	thread_smccc(&ffa_rx_release_args);
2371 
2372 	return ret;
2373 }
2374 
2375 static TEE_Result spmc_init(void)
2376 {
2377 	unsigned int major = 0;
2378 	unsigned int minor __maybe_unused = 0;
2379 	uint32_t my_vers = 0;
2380 	uint32_t vers = 0;
2381 
2382 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2383 	vers = get_ffa_version(my_vers);
2384 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2385 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2386 	DMSG("SPMC reported version %u.%u", major, minor);
2387 	if (major != FFA_VERSION_MAJOR) {
2388 		EMSG("Incompatible major version %u, expected %u",
2389 		     major, FFA_VERSION_MAJOR);
2390 		panic();
2391 	}
2392 	if (vers < my_vers)
2393 		my_vers = vers;
2394 	DMSG("Using version %u.%u",
2395 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2396 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2397 	my_rxtx.ffa_vers = my_vers;
2398 
2399 	spmc_rxtx_map(&my_rxtx);
2400 
2401 	spmc_id = ffa_spm_id_get();
2402 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2403 
2404 	optee_endpoint_id = ffa_id_get();
2405 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2406 
2407 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2408 		spmc_notif_is_ready = true;
2409 		DMSG("Asynchronous notifications are ready");
2410 	}
2411 
2412 	return TEE_SUCCESS;
2413 }
2414 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2415 
2416 /*
2417  * boot_final() is always done before exiting at end of boot
2418  * initialization.  In case of virtualization the init-calls are done only
2419  * once a OP-TEE partition has been created. So with virtualization we have
2420  * to initialize via boot_final() to make sure we have a value assigned
2421  * before it's used the first time.
2422  */
2423 #ifdef CFG_NS_VIRTUALIZATION
2424 boot_final(spmc_init);
2425 #else
2426 service_init(spmc_init);
2427 #endif
2428