xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision d17db2afa6a46f22659c37116ad409a07fa18ebc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 struct notif_vm_bitmap {
51 	bool initialized;
52 	int do_bottom_half_value;
53 	uint64_t pending;
54 	uint64_t bound;
55 };
56 
57 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
58 static bool spmc_notif_is_ready __nex_bss;
59 static int notif_intid __nex_data __maybe_unused = -1;
60 
61 /* Id used to look up the guest specific struct notif_vm_bitmap */
62 static unsigned int notif_vm_bitmap_id __nex_bss;
63 /* Notification state when ns-virtualization isn't enabled */
64 static struct notif_vm_bitmap default_notif_vm_bitmap;
65 
66 /* Initialized in spmc_init() below */
67 uint16_t optee_endpoint_id __nex_bss;
68 uint16_t spmc_id __nex_bss;
69 #ifdef CFG_CORE_SEL1_SPMC
70 uint16_t spmd_id __nex_bss;
71 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
72 				      FFA_PART_PROP_DIRECT_REQ_SEND |
73 #ifdef CFG_NS_VIRTUALIZATION
74 				      FFA_PART_PROP_NOTIF_CREATED |
75 				      FFA_PART_PROP_NOTIF_DESTROYED |
76 #endif
77 #ifdef ARM64
78 				      FFA_PART_PROP_AARCH64_STATE |
79 #endif
80 				      FFA_PART_PROP_IS_PE_ID;
81 
82 static uint32_t my_uuid_words[] = {
83 	/*
84 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
85 	 *   SP, or
86 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
87 	 *   logical partition, residing in the same exception level as the
88 	 *   SPMC
89 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
90 	 */
91 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
92 };
93 
94 /*
95  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
96  *
97  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
98  * access this includes the use of content of struct ffa_rxtx::rx and
99  * @frag_state_head.
100  *
101  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
102  * ffa_rxtx::tx and false when it is owned by normal world.
103  *
104  * Note that we can't prevent normal world from updating the content of
105  * these buffers so we must always be careful when reading. while we hold
106  * the lock.
107  */
108 
109 static struct ffa_rxtx my_rxtx __nex_bss;
110 
111 static bool is_nw_buf(struct ffa_rxtx *rxtx)
112 {
113 	return rxtx == &my_rxtx;
114 }
115 
116 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
117 	SLIST_HEAD_INITIALIZER(&frag_state_head);
118 
119 #else
120 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
121 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
122 static struct ffa_rxtx my_rxtx = {
123 	.rx = __rx_buf,
124 	.tx = __tx_buf,
125 	.size = sizeof(__rx_buf),
126 };
127 #endif
128 
129 static uint32_t swap_src_dst(uint32_t src_dst)
130 {
131 	return (src_dst >> 16) | (src_dst << 16);
132 }
133 
134 static uint16_t get_sender_id(uint32_t src_dst)
135 {
136 	return src_dst >> 16;
137 }
138 
139 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
140 		   uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
141 		   uint32_t w5)
142 {
143 	*args = (struct thread_smc_1_2_regs){
144 		.a0 = fid,
145 		.a1 = src_dst,
146 		.a2 = w2,
147 		.a3 = w3,
148 		.a4 = w4,
149 		.a5 = w5,
150 	};
151 }
152 
153 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
154 {
155 	if (ffa_ret)
156 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
157 	else
158 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
159 }
160 
161 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
162 {
163 	/*
164 	 * No locking, if the caller does concurrent calls to this it's
165 	 * only making a mess for itself. We must be able to renegotiate
166 	 * the FF-A version in order to support differing versions between
167 	 * the loader and the driver.
168 	 */
169 	if (vers < FFA_VERSION_1_1)
170 		rxtx->ffa_vers = FFA_VERSION_1_0;
171 	else
172 		rxtx->ffa_vers = FFA_VERSION_1_1;
173 
174 	return rxtx->ffa_vers;
175 }
176 
177 static bool is_ffa_success(uint32_t fid)
178 {
179 #ifdef ARM64
180 	if (fid == FFA_SUCCESS_64)
181 		return true;
182 #endif
183 	return fid == FFA_SUCCESS_32;
184 }
185 
186 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
187 {
188 	if (is_ffa_success(args->a0))
189 		return FFA_OK;
190 	if (args->a0 == FFA_ERROR && args->a2)
191 		return args->a2;
192 	return FFA_NOT_SUPPORTED;
193 }
194 
195 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
196 			   unsigned long a3, unsigned long a4)
197 {
198 	struct thread_smc_args args = {
199 		.a0 = fid,
200 		.a1 = a1,
201 		.a2 = a2,
202 		.a3 = a3,
203 		.a4 = a4,
204 	};
205 
206 	thread_smccc(&args);
207 
208 	return get_ffa_ret_code(&args);
209 }
210 
211 static int __maybe_unused ffa_features(uint32_t id)
212 {
213 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
214 }
215 
216 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
217 					       uint32_t flags, uint64_t bitmap)
218 {
219 	return ffa_simple_call(FFA_NOTIFICATION_SET,
220 			       SHIFT_U32(src, 16) | dst, flags,
221 			       low32_from_64(bitmap), high32_from_64(bitmap));
222 }
223 
224 #if defined(CFG_CORE_SEL1_SPMC)
225 static void handle_features(struct thread_smc_1_2_regs *args)
226 {
227 	uint32_t ret_fid = FFA_ERROR;
228 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
229 
230 	switch (args->a1) {
231 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
232 		if (spmc_notif_is_ready) {
233 			ret_fid = FFA_SUCCESS_32;
234 			ret_w2 = notif_intid;
235 		}
236 		break;
237 
238 #ifdef ARM64
239 	case FFA_RXTX_MAP_64:
240 #endif
241 	case FFA_RXTX_MAP_32:
242 		ret_fid = FFA_SUCCESS_32;
243 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
244 		break;
245 #ifdef ARM64
246 	case FFA_MEM_SHARE_64:
247 #endif
248 	case FFA_MEM_SHARE_32:
249 		ret_fid = FFA_SUCCESS_32;
250 		/*
251 		 * Partition manager supports transmission of a memory
252 		 * transaction descriptor in a buffer dynamically allocated
253 		 * by the endpoint.
254 		 */
255 		ret_w2 = BIT(0);
256 		break;
257 
258 	case FFA_ERROR:
259 	case FFA_VERSION:
260 	case FFA_SUCCESS_32:
261 #ifdef ARM64
262 	case FFA_SUCCESS_64:
263 #endif
264 	case FFA_FEATURES:
265 	case FFA_SPM_ID_GET:
266 	case FFA_MEM_FRAG_TX:
267 	case FFA_MEM_RECLAIM:
268 	case FFA_MSG_SEND_DIRECT_REQ_64:
269 	case FFA_MSG_SEND_DIRECT_REQ_32:
270 	case FFA_INTERRUPT:
271 	case FFA_PARTITION_INFO_GET:
272 	case FFA_RXTX_UNMAP:
273 	case FFA_RX_RELEASE:
274 	case FFA_FEATURE_MANAGED_EXIT_INTR:
275 	case FFA_NOTIFICATION_BITMAP_CREATE:
276 	case FFA_NOTIFICATION_BITMAP_DESTROY:
277 	case FFA_NOTIFICATION_BIND:
278 	case FFA_NOTIFICATION_UNBIND:
279 	case FFA_NOTIFICATION_SET:
280 	case FFA_NOTIFICATION_GET:
281 	case FFA_NOTIFICATION_INFO_GET_32:
282 #ifdef ARM64
283 	case FFA_NOTIFICATION_INFO_GET_64:
284 #endif
285 		ret_fid = FFA_SUCCESS_32;
286 		ret_w2 = FFA_PARAM_MBZ;
287 		break;
288 	default:
289 		break;
290 	}
291 
292 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
293 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
294 }
295 
296 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
297 {
298 	tee_mm_entry_t *mm = NULL;
299 
300 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
301 		return FFA_INVALID_PARAMETERS;
302 
303 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
304 	if (!mm)
305 		return FFA_NO_MEMORY;
306 
307 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
308 					  sz / SMALL_PAGE_SIZE,
309 					  MEM_AREA_NSEC_SHM)) {
310 		tee_mm_free(mm);
311 		return FFA_INVALID_PARAMETERS;
312 	}
313 
314 	*va_ret = (void *)tee_mm_get_smem(mm);
315 	return 0;
316 }
317 
318 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
319 {
320 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id,
321 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
322 }
323 
324 static void unmap_buf(void *va, size_t sz)
325 {
326 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
327 
328 	assert(mm);
329 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
330 	tee_mm_free(mm);
331 }
332 
333 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
334 			  struct ffa_rxtx *rxtx)
335 {
336 	int rc = 0;
337 	unsigned int sz = 0;
338 	paddr_t rx_pa = 0;
339 	paddr_t tx_pa = 0;
340 	void *rx = NULL;
341 	void *tx = NULL;
342 
343 	cpu_spin_lock(&rxtx->spinlock);
344 
345 	if (args->a3 & GENMASK_64(63, 6)) {
346 		rc = FFA_INVALID_PARAMETERS;
347 		goto out;
348 	}
349 
350 	sz = args->a3 * SMALL_PAGE_SIZE;
351 	if (!sz) {
352 		rc = FFA_INVALID_PARAMETERS;
353 		goto out;
354 	}
355 	/* TX/RX are swapped compared to the caller */
356 	tx_pa = args->a2;
357 	rx_pa = args->a1;
358 
359 	if (rxtx->size) {
360 		rc = FFA_DENIED;
361 		goto out;
362 	}
363 
364 	/*
365 	 * If the buffer comes from a SP the address is virtual and already
366 	 * mapped.
367 	 */
368 	if (is_nw_buf(rxtx)) {
369 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
370 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
371 			bool tx_alloced = false;
372 
373 			/*
374 			 * With virtualization we establish this mapping in
375 			 * the nexus mapping which then is replicated to
376 			 * each partition.
377 			 *
378 			 * This means that this mapping must be done before
379 			 * any partition is created and then must not be
380 			 * changed.
381 			 */
382 
383 			/*
384 			 * core_mmu_add_mapping() may reuse previous
385 			 * mappings. First check if there's any mappings to
386 			 * reuse so we know how to clean up in case of
387 			 * failure.
388 			 */
389 			tx = phys_to_virt(tx_pa, mt, sz);
390 			rx = phys_to_virt(rx_pa, mt, sz);
391 			if (!tx) {
392 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
393 				if (!tx) {
394 					rc = FFA_NO_MEMORY;
395 					goto out;
396 				}
397 				tx_alloced = true;
398 			}
399 			if (!rx)
400 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
401 
402 			if (!rx) {
403 				if (tx_alloced && tx)
404 					core_mmu_remove_mapping(mt, tx, sz);
405 				rc = FFA_NO_MEMORY;
406 				goto out;
407 			}
408 		} else {
409 			rc = map_buf(tx_pa, sz, &tx);
410 			if (rc)
411 				goto out;
412 			rc = map_buf(rx_pa, sz, &rx);
413 			if (rc) {
414 				unmap_buf(tx, sz);
415 				goto out;
416 			}
417 		}
418 		rxtx->tx = tx;
419 		rxtx->rx = rx;
420 	} else {
421 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
422 			rc = FFA_INVALID_PARAMETERS;
423 			goto out;
424 		}
425 
426 		if (!virt_to_phys((void *)tx_pa) ||
427 		    !virt_to_phys((void *)rx_pa)) {
428 			rc = FFA_INVALID_PARAMETERS;
429 			goto out;
430 		}
431 
432 		rxtx->tx = (void *)tx_pa;
433 		rxtx->rx = (void *)rx_pa;
434 	}
435 
436 	rxtx->size = sz;
437 	rxtx->tx_is_mine = true;
438 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
439 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
440 out:
441 	cpu_spin_unlock(&rxtx->spinlock);
442 	set_simple_ret_val(args, rc);
443 }
444 
445 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
446 			    struct ffa_rxtx *rxtx)
447 {
448 	int rc = FFA_INVALID_PARAMETERS;
449 
450 	cpu_spin_lock(&rxtx->spinlock);
451 
452 	if (!rxtx->size)
453 		goto out;
454 
455 	/*
456 	 * We don't unmap the SP memory as the SP might still use it.
457 	 * We avoid to make changes to nexus mappings at this stage since
458 	 * there currently isn't a way to replicate those changes to all
459 	 * partitions.
460 	 */
461 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
462 		unmap_buf(rxtx->rx, rxtx->size);
463 		unmap_buf(rxtx->tx, rxtx->size);
464 	}
465 	rxtx->size = 0;
466 	rxtx->rx = NULL;
467 	rxtx->tx = NULL;
468 	rc = 0;
469 out:
470 	cpu_spin_unlock(&rxtx->spinlock);
471 	set_simple_ret_val(args, rc);
472 }
473 
474 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
475 			    struct ffa_rxtx *rxtx)
476 {
477 	int rc = 0;
478 
479 	cpu_spin_lock(&rxtx->spinlock);
480 	/* The senders RX is our TX */
481 	if (!rxtx->size || rxtx->tx_is_mine) {
482 		rc = FFA_DENIED;
483 	} else {
484 		rc = 0;
485 		rxtx->tx_is_mine = true;
486 	}
487 	cpu_spin_unlock(&rxtx->spinlock);
488 
489 	set_simple_ret_val(args, rc);
490 }
491 
492 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
493 {
494 	return !w0 && !w1 && !w2 && !w3;
495 }
496 
497 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
498 {
499 	/*
500 	 * This depends on which UUID we have been assigned.
501 	 * TODO add a generic mechanism to obtain our UUID.
502 	 *
503 	 * The test below is for the hard coded UUID
504 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
505 	 */
506 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
507 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
508 }
509 
510 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
511 				     size_t idx, uint16_t endpoint_id,
512 				     uint16_t execution_context,
513 				     uint32_t part_props,
514 				     const uint32_t uuid_words[4])
515 {
516 	struct ffa_partition_info_x *fpi = NULL;
517 	size_t fpi_size = sizeof(*fpi);
518 
519 	if (ffa_vers >= FFA_VERSION_1_1)
520 		fpi_size += FFA_UUID_SIZE;
521 
522 	if ((idx + 1) * fpi_size > blen)
523 		return TEE_ERROR_OUT_OF_MEMORY;
524 
525 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
526 	fpi->id = endpoint_id;
527 	/* Number of execution contexts implemented by this partition */
528 	fpi->execution_context = execution_context;
529 
530 	fpi->partition_properties = part_props;
531 
532 	/* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
533 	if (ffa_vers < FFA_VERSION_1_1)
534 		fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
535 					     FFA_PART_PROP_DIRECT_REQ_SEND |
536 					     FFA_PART_PROP_INDIRECT_MSGS;
537 
538 	if (ffa_vers >= FFA_VERSION_1_1) {
539 		if (uuid_words)
540 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
541 		else
542 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
543 	}
544 
545 	return TEE_SUCCESS;
546 }
547 
548 static int handle_partition_info_get_all(size_t *elem_count,
549 					 struct ffa_rxtx *rxtx, bool count_only)
550 {
551 	if (!count_only) {
552 		/* Add OP-TEE SP */
553 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
554 					      rxtx->size, 0, optee_endpoint_id,
555 					      CFG_TEE_CORE_NB_CORE,
556 					      my_part_props, my_uuid_words))
557 			return FFA_NO_MEMORY;
558 	}
559 	*elem_count = 1;
560 
561 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
562 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
563 					  NULL, elem_count, count_only))
564 			return FFA_NO_MEMORY;
565 	}
566 
567 	return FFA_OK;
568 }
569 
570 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
571 				    struct ffa_rxtx *rxtx)
572 {
573 	TEE_Result res = TEE_SUCCESS;
574 	uint32_t ret_fid = FFA_ERROR;
575 	uint32_t fpi_size = 0;
576 	uint32_t rc = 0;
577 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
578 
579 	if (!count_only) {
580 		cpu_spin_lock(&rxtx->spinlock);
581 
582 		if (!rxtx->size || !rxtx->tx_is_mine) {
583 			rc = FFA_BUSY;
584 			goto out;
585 		}
586 	}
587 
588 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
589 		size_t elem_count = 0;
590 
591 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
592 							count_only);
593 
594 		if (ret_fid) {
595 			rc = ret_fid;
596 			ret_fid = FFA_ERROR;
597 		} else {
598 			ret_fid = FFA_SUCCESS_32;
599 			rc = elem_count;
600 		}
601 
602 		goto out;
603 	}
604 
605 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
606 		if (!count_only) {
607 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
608 							rxtx->tx, rxtx->size, 0,
609 							optee_endpoint_id,
610 							CFG_TEE_CORE_NB_CORE,
611 							my_part_props,
612 							my_uuid_words);
613 			if (res) {
614 				ret_fid = FFA_ERROR;
615 				rc = FFA_INVALID_PARAMETERS;
616 				goto out;
617 			}
618 		}
619 		rc = 1;
620 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
621 		uint32_t uuid_array[4] = { 0 };
622 		TEE_UUID uuid = { };
623 		size_t count = 0;
624 
625 		uuid_array[0] = args->a1;
626 		uuid_array[1] = args->a2;
627 		uuid_array[2] = args->a3;
628 		uuid_array[3] = args->a4;
629 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
630 
631 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
632 					    rxtx->size, &uuid, &count,
633 					    count_only);
634 		if (res != TEE_SUCCESS) {
635 			ret_fid = FFA_ERROR;
636 			rc = FFA_INVALID_PARAMETERS;
637 			goto out;
638 		}
639 		rc = count;
640 	} else {
641 		ret_fid = FFA_ERROR;
642 		rc = FFA_INVALID_PARAMETERS;
643 		goto out;
644 	}
645 
646 	ret_fid = FFA_SUCCESS_32;
647 
648 out:
649 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
650 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
651 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
652 
653 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
654 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
655 	if (!count_only) {
656 		rxtx->tx_is_mine = false;
657 		cpu_spin_unlock(&rxtx->spinlock);
658 	}
659 }
660 
661 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
662 {
663 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
664 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
665 	uint32_t rc = FFA_OK;
666 
667 	if (endpoint != optee_endpoint_id) {
668 		/*
669 		 * The endpoint should be an SP, try to resume the SP from
670 		 * preempted into busy state.
671 		 */
672 		rc = spmc_sp_resume_from_preempted(endpoint);
673 		if (rc)
674 			goto out;
675 	}
676 
677 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
678 
679 	/* thread_resume_from_rpc return only of the thread_id is invalid */
680 	rc = FFA_INVALID_PARAMETERS;
681 
682 out:
683 	set_simple_ret_val(args, rc);
684 }
685 #endif /*CFG_CORE_SEL1_SPMC*/
686 
687 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
688 						   uint16_t vm_id)
689 {
690 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
691 		if (!prtn)
692 			return NULL;
693 		assert(vm_id == virt_get_guest_id(prtn));
694 		return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
695 	}
696 	if (vm_id)
697 		return NULL;
698 	return &default_notif_vm_bitmap;
699 }
700 
701 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
702 					uint16_t vm_id)
703 {
704 	struct guest_partition *prtn = NULL;
705 	struct notif_vm_bitmap *nvb = NULL;
706 	uint32_t old_itr_status = 0;
707 	uint32_t res = 0;
708 
709 	if (!spmc_notif_is_ready) {
710 		/*
711 		 * This should never happen, not if normal world respects the
712 		 * exchanged capabilities.
713 		 */
714 		EMSG("Asynchronous notifications are not ready");
715 		return TEE_ERROR_NOT_IMPLEMENTED;
716 	}
717 
718 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
719 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
720 		return TEE_ERROR_BAD_PARAMETERS;
721 	}
722 
723 	prtn = virt_get_guest(vm_id);
724 	nvb = get_notif_vm_bitmap(prtn, vm_id);
725 	if (!nvb) {
726 		res = TEE_ERROR_BAD_PARAMETERS;
727 		goto out;
728 	}
729 
730 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
731 	nvb->do_bottom_half_value = bottom_half_value;
732 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
733 
734 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
735 	res = TEE_SUCCESS;
736 out:
737 	virt_put_guest(prtn);
738 	return res;
739 }
740 
741 static void handle_yielding_call(struct thread_smc_1_2_regs *args,
742 				 uint32_t direct_resp_fid)
743 {
744 	TEE_Result res = 0;
745 
746 	thread_check_canaries();
747 
748 #ifdef ARM64
749 	/* Saving this for an eventual RPC */
750 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
751 #endif
752 
753 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
754 		/* Note connection to struct thread_rpc_arg::ret */
755 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
756 				       0);
757 		res = TEE_ERROR_BAD_PARAMETERS;
758 	} else {
759 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
760 				     args->a6, args->a7);
761 		res = TEE_ERROR_BUSY;
762 	}
763 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
764 		      0, res, 0, 0);
765 }
766 
767 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
768 {
769 	uint64_t cookie = reg_pair_to_64(a5, a4);
770 	uint32_t res = 0;
771 
772 	res = mobj_ffa_unregister_by_cookie(cookie);
773 	switch (res) {
774 	case TEE_SUCCESS:
775 	case TEE_ERROR_ITEM_NOT_FOUND:
776 		return 0;
777 	case TEE_ERROR_BUSY:
778 		EMSG("res %#"PRIx32, res);
779 		return FFA_BUSY;
780 	default:
781 		EMSG("res %#"PRIx32, res);
782 		return FFA_INVALID_PARAMETERS;
783 	}
784 }
785 
786 static void handle_blocking_call(struct thread_smc_1_2_regs *args,
787 				 uint32_t direct_resp_fid)
788 {
789 	uint32_t sec_caps = 0;
790 
791 	switch (args->a3) {
792 	case OPTEE_FFA_GET_API_VERSION:
793 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
794 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
795 			      0);
796 		break;
797 	case OPTEE_FFA_GET_OS_VERSION:
798 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
799 			      CFG_OPTEE_REVISION_MAJOR,
800 			      CFG_OPTEE_REVISION_MINOR,
801 			      TEE_IMPL_GIT_SHA1 >> 32);
802 		break;
803 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
804 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
805 		if (spmc_notif_is_ready)
806 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
807 		if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
808 			sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
809 		spmc_set_args(args, direct_resp_fid,
810 			      swap_src_dst(args->a1), 0, 0,
811 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
812 		break;
813 	case OPTEE_FFA_UNREGISTER_SHM:
814 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
815 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
816 		break;
817 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
818 		spmc_set_args(args, direct_resp_fid,
819 			      swap_src_dst(args->a1), 0,
820 			      spmc_enable_async_notif(args->a4,
821 						      FFA_SRC(args->a1)),
822 			      0, 0);
823 		break;
824 	default:
825 		EMSG("Unhandled blocking service ID %#"PRIx32,
826 		     (uint32_t)args->a3);
827 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
828 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
829 	}
830 }
831 
832 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args,
833 					    struct ffa_rxtx *rxtx,
834 					    uint32_t direct_resp_fid)
835 {
836 	uint32_t w0 = FFA_ERROR;
837 	uint32_t w1 = FFA_PARAM_MBZ;
838 	uint32_t w2 = FFA_NOT_SUPPORTED;
839 	uint32_t w3 = FFA_PARAM_MBZ;
840 
841 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
842 	case FFA_MSG_SEND_VM_CREATED:
843 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
844 			uint16_t guest_id = args->a5;
845 			TEE_Result res = virt_guest_created(guest_id);
846 
847 			w0 = direct_resp_fid;
848 			w1 = swap_src_dst(args->a1);
849 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
850 			if (res == TEE_SUCCESS)
851 				w3 = FFA_OK;
852 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
853 				w3 = FFA_DENIED;
854 			else
855 				w3 = FFA_INVALID_PARAMETERS;
856 		}
857 		break;
858 	case FFA_MSG_SEND_VM_DESTROYED:
859 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
860 			uint16_t guest_id = args->a5;
861 			TEE_Result res = virt_guest_destroyed(guest_id);
862 
863 			w0 = direct_resp_fid;
864 			w1 = swap_src_dst(args->a1);
865 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
866 			if (res == TEE_SUCCESS)
867 				w3 = FFA_OK;
868 			else
869 				w3 = FFA_INVALID_PARAMETERS;
870 		}
871 		break;
872 	case FFA_MSG_VERSION_REQ:
873 		w0 = direct_resp_fid;
874 		w1 = swap_src_dst(args->a1);
875 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
876 		w3 = spmc_exchange_version(args->a3, rxtx);
877 		break;
878 	default:
879 		break;
880 	}
881 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
882 }
883 
884 static void handle_direct_request(struct thread_smc_1_2_regs *args,
885 				  struct ffa_rxtx *rxtx)
886 {
887 	uint32_t direct_resp_fid = 0;
888 
889 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
890 	    FFA_DST(args->a1) != spmc_id &&
891 	    FFA_DST(args->a1) != optee_endpoint_id) {
892 		spmc_sp_start_thread(args);
893 		return;
894 	}
895 
896 	if (OPTEE_SMC_IS_64(args->a0))
897 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
898 	else
899 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
900 
901 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
902 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
903 		return;
904 	}
905 
906 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
907 	    virt_set_guest(get_sender_id(args->a1))) {
908 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
909 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
910 		return;
911 	}
912 
913 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
914 		handle_yielding_call(args, direct_resp_fid);
915 	else
916 		handle_blocking_call(args, direct_resp_fid);
917 
918 	/*
919 	 * Note that handle_yielding_call() typically only returns if a
920 	 * thread cannot be allocated or found. virt_unset_guest() is also
921 	 * called from thread_state_suspend() and thread_state_free().
922 	 */
923 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
924 		virt_unset_guest();
925 }
926 
927 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
928 			      struct ffa_mem_transaction_x *trans)
929 {
930 	uint16_t mem_reg_attr = 0;
931 	uint32_t flags = 0;
932 	uint32_t count = 0;
933 	uint32_t offs = 0;
934 	uint32_t size = 0;
935 	size_t n = 0;
936 
937 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
938 		return FFA_INVALID_PARAMETERS;
939 
940 	if (ffa_vers >= FFA_VERSION_1_1) {
941 		struct ffa_mem_transaction_1_1 *descr = NULL;
942 
943 		if (blen < sizeof(*descr))
944 			return FFA_INVALID_PARAMETERS;
945 
946 		descr = buf;
947 		trans->sender_id = READ_ONCE(descr->sender_id);
948 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
949 		flags = READ_ONCE(descr->flags);
950 		trans->global_handle = READ_ONCE(descr->global_handle);
951 		trans->tag = READ_ONCE(descr->tag);
952 
953 		count = READ_ONCE(descr->mem_access_count);
954 		size = READ_ONCE(descr->mem_access_size);
955 		offs = READ_ONCE(descr->mem_access_offs);
956 	} else {
957 		struct ffa_mem_transaction_1_0 *descr = NULL;
958 
959 		if (blen < sizeof(*descr))
960 			return FFA_INVALID_PARAMETERS;
961 
962 		descr = buf;
963 		trans->sender_id = READ_ONCE(descr->sender_id);
964 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
965 		flags = READ_ONCE(descr->flags);
966 		trans->global_handle = READ_ONCE(descr->global_handle);
967 		trans->tag = READ_ONCE(descr->tag);
968 
969 		count = READ_ONCE(descr->mem_access_count);
970 		size = sizeof(struct ffa_mem_access);
971 		offs = offsetof(struct ffa_mem_transaction_1_0,
972 				mem_access_array);
973 	}
974 
975 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
976 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
977 		return FFA_INVALID_PARAMETERS;
978 
979 	/* Check that the endpoint memory access descriptor array fits */
980 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
981 	    n > blen)
982 		return FFA_INVALID_PARAMETERS;
983 
984 	trans->mem_reg_attr = mem_reg_attr;
985 	trans->flags = flags;
986 	trans->mem_access_size = size;
987 	trans->mem_access_count = count;
988 	trans->mem_access_offs = offs;
989 	return 0;
990 }
991 
992 #if defined(CFG_CORE_SEL1_SPMC)
993 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
994 			 unsigned int mem_access_count, uint8_t *acc_perms,
995 			 unsigned int *region_offs)
996 {
997 	struct ffa_mem_access_perm *descr = NULL;
998 	struct ffa_mem_access *mem_acc = NULL;
999 	unsigned int n = 0;
1000 
1001 	for (n = 0; n < mem_access_count; n++) {
1002 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1003 		descr = &mem_acc->access_perm;
1004 		if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) {
1005 			*acc_perms = READ_ONCE(descr->perm);
1006 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
1007 			return 0;
1008 		}
1009 	}
1010 
1011 	return FFA_INVALID_PARAMETERS;
1012 }
1013 
1014 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
1015 			  size_t blen, unsigned int *page_count,
1016 			  unsigned int *region_count, size_t *addr_range_offs)
1017 {
1018 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1019 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1020 	struct ffa_mem_region *region_descr = NULL;
1021 	unsigned int region_descr_offs = 0;
1022 	uint8_t mem_acc_perm = 0;
1023 	size_t n = 0;
1024 
1025 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1026 		return FFA_INVALID_PARAMETERS;
1027 
1028 	/* Check that the access permissions matches what's expected */
1029 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1030 			  mem_trans->mem_access_size,
1031 			  mem_trans->mem_access_count,
1032 			  &mem_acc_perm, &region_descr_offs) ||
1033 	    mem_acc_perm != exp_mem_acc_perm)
1034 		return FFA_INVALID_PARAMETERS;
1035 
1036 	/* Check that the Composite memory region descriptor fits */
1037 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1038 	    n > blen)
1039 		return FFA_INVALID_PARAMETERS;
1040 
1041 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1042 				  struct ffa_mem_region))
1043 		return FFA_INVALID_PARAMETERS;
1044 
1045 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1046 						 region_descr_offs);
1047 	*page_count = READ_ONCE(region_descr->total_page_count);
1048 	*region_count = READ_ONCE(region_descr->address_range_count);
1049 	*addr_range_offs = n;
1050 	return 0;
1051 }
1052 
1053 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1054 				size_t flen)
1055 {
1056 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1057 	struct ffa_address_range *arange = NULL;
1058 	unsigned int n = 0;
1059 
1060 	if (region_count > s->region_count)
1061 		region_count = s->region_count;
1062 
1063 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1064 		return FFA_INVALID_PARAMETERS;
1065 	arange = buf;
1066 
1067 	for (n = 0; n < region_count; n++) {
1068 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1069 		uint64_t addr = READ_ONCE(arange[n].address);
1070 
1071 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1072 					  addr, page_count))
1073 			return FFA_INVALID_PARAMETERS;
1074 	}
1075 
1076 	s->region_count -= region_count;
1077 	if (s->region_count)
1078 		return region_count * sizeof(*arange);
1079 
1080 	if (s->current_page_idx != s->page_count)
1081 		return FFA_INVALID_PARAMETERS;
1082 
1083 	return 0;
1084 }
1085 
1086 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1087 {
1088 	int rc = 0;
1089 
1090 	rc = add_mem_share_helper(&s->share, buf, flen);
1091 	if (rc >= 0) {
1092 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1093 			/* We're not at the end of the descriptor yet */
1094 			if (s->share.region_count)
1095 				return s->frag_offset;
1096 
1097 			/* We're done */
1098 			rc = 0;
1099 		} else {
1100 			rc = FFA_INVALID_PARAMETERS;
1101 		}
1102 	}
1103 
1104 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1105 	if (rc < 0)
1106 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1107 	else
1108 		mobj_ffa_push_to_inactive(s->share.mf);
1109 	free(s);
1110 
1111 	return rc;
1112 }
1113 
1114 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1115 			void *buf)
1116 {
1117 	struct ffa_mem_access_perm *perm = NULL;
1118 	struct ffa_mem_access *mem_acc = NULL;
1119 
1120 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1121 		return false;
1122 
1123 	if (mem_trans->mem_access_count < 1)
1124 		return false;
1125 
1126 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1127 	perm = &mem_acc->access_perm;
1128 
1129 	/*
1130 	 * perm->endpoint_id is read here only to check if the endpoint is
1131 	 * OP-TEE. We do read it later on again, but there are some additional
1132 	 * checks there to make sure that the data is correct.
1133 	 */
1134 	return READ_ONCE(perm->endpoint_id) != optee_endpoint_id;
1135 }
1136 
1137 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1138 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1139 			 size_t flen, uint64_t *global_handle)
1140 {
1141 	int rc = 0;
1142 	struct mem_share_state share = { };
1143 	size_t addr_range_offs = 0;
1144 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1145 	size_t n = 0;
1146 
1147 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1148 			    &share.region_count, &addr_range_offs);
1149 	if (rc)
1150 		return rc;
1151 
1152 	if (!share.page_count || !share.region_count)
1153 		return FFA_INVALID_PARAMETERS;
1154 
1155 	if (MUL_OVERFLOW(share.region_count,
1156 			 sizeof(struct ffa_address_range), &n) ||
1157 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1158 		return FFA_INVALID_PARAMETERS;
1159 
1160 	if (mem_trans->global_handle)
1161 		cookie = mem_trans->global_handle;
1162 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1163 	if (!share.mf)
1164 		return FFA_NO_MEMORY;
1165 
1166 	if (flen != blen) {
1167 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1168 
1169 		if (!s) {
1170 			rc = FFA_NO_MEMORY;
1171 			goto err;
1172 		}
1173 		s->share = share;
1174 		s->mm = mm;
1175 		s->frag_offset = addr_range_offs;
1176 
1177 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1178 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1179 					flen - addr_range_offs);
1180 
1181 		if (rc >= 0)
1182 			*global_handle = mobj_ffa_get_cookie(share.mf);
1183 
1184 		return rc;
1185 	}
1186 
1187 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1188 				  flen - addr_range_offs);
1189 	if (rc) {
1190 		/*
1191 		 * Number of consumed bytes may be returned instead of 0 for
1192 		 * done.
1193 		 */
1194 		rc = FFA_INVALID_PARAMETERS;
1195 		goto err;
1196 	}
1197 
1198 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1199 
1200 	return 0;
1201 err:
1202 	mobj_ffa_sel1_spmc_delete(share.mf);
1203 	return rc;
1204 }
1205 
1206 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1207 				 unsigned int page_count,
1208 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1209 {
1210 	struct ffa_mem_transaction_x mem_trans = { };
1211 	int rc = 0;
1212 	size_t len = 0;
1213 	void *buf = NULL;
1214 	tee_mm_entry_t *mm = NULL;
1215 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1216 
1217 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1218 		return FFA_INVALID_PARAMETERS;
1219 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1220 		return FFA_INVALID_PARAMETERS;
1221 
1222 	/*
1223 	 * Check that the length reported in flen is covered by len even
1224 	 * if the offset is taken into account.
1225 	 */
1226 	if (len < flen || len - offs < flen)
1227 		return FFA_INVALID_PARAMETERS;
1228 
1229 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1230 	if (!mm)
1231 		return FFA_NO_MEMORY;
1232 
1233 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1234 					  page_count, MEM_AREA_NSEC_SHM)) {
1235 		rc = FFA_INVALID_PARAMETERS;
1236 		goto out;
1237 	}
1238 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1239 
1240 	cpu_spin_lock(&rxtx->spinlock);
1241 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1242 	if (rc)
1243 		goto unlock;
1244 
1245 	if (is_sp_share(&mem_trans, buf)) {
1246 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1247 				       global_handle, NULL);
1248 		goto unlock;
1249 	}
1250 
1251 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1252 	    virt_set_guest(mem_trans.sender_id)) {
1253 		rc = FFA_DENIED;
1254 		goto unlock;
1255 	}
1256 
1257 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1258 
1259 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1260 		virt_unset_guest();
1261 
1262 unlock:
1263 	cpu_spin_unlock(&rxtx->spinlock);
1264 	if (rc > 0)
1265 		return rc;
1266 
1267 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1268 out:
1269 	tee_mm_free(mm);
1270 	return rc;
1271 }
1272 
1273 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1274 				  uint64_t *global_handle,
1275 				  struct ffa_rxtx *rxtx)
1276 {
1277 	struct ffa_mem_transaction_x mem_trans = { };
1278 	int rc = FFA_DENIED;
1279 
1280 	cpu_spin_lock(&rxtx->spinlock);
1281 
1282 	if (!rxtx->rx || flen > rxtx->size)
1283 		goto out;
1284 
1285 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1286 				       &mem_trans);
1287 	if (rc)
1288 		goto out;
1289 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1290 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1291 				       global_handle, NULL);
1292 		goto out;
1293 	}
1294 
1295 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1296 	    virt_set_guest(mem_trans.sender_id))
1297 		goto out;
1298 
1299 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1300 			   global_handle);
1301 
1302 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1303 		virt_unset_guest();
1304 
1305 out:
1306 	cpu_spin_unlock(&rxtx->spinlock);
1307 
1308 	return rc;
1309 }
1310 
1311 static void handle_mem_share(struct thread_smc_1_2_regs *args,
1312 			     struct ffa_rxtx *rxtx)
1313 {
1314 	uint32_t tot_len = args->a1;
1315 	uint32_t frag_len = args->a2;
1316 	uint64_t addr = args->a3;
1317 	uint32_t page_count = args->a4;
1318 	uint32_t ret_w1 = 0;
1319 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1320 	uint32_t ret_w3 = 0;
1321 	uint32_t ret_fid = FFA_ERROR;
1322 	uint64_t global_handle = 0;
1323 	int rc = 0;
1324 
1325 	/* Check that the MBZs are indeed 0 */
1326 	if (args->a5 || args->a6 || args->a7)
1327 		goto out;
1328 
1329 	/* Check that fragment length doesn't exceed total length */
1330 	if (frag_len > tot_len)
1331 		goto out;
1332 
1333 	/* Check for 32-bit calling convention */
1334 	if (args->a0 == FFA_MEM_SHARE_32)
1335 		addr &= UINT32_MAX;
1336 
1337 	if (!addr) {
1338 		/*
1339 		 * The memory transaction descriptor is passed via our rx
1340 		 * buffer.
1341 		 */
1342 		if (page_count)
1343 			goto out;
1344 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1345 					    rxtx);
1346 	} else {
1347 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1348 					   &global_handle, rxtx);
1349 	}
1350 	if (rc < 0) {
1351 		ret_w2 = rc;
1352 	} else if (rc > 0) {
1353 		ret_fid = FFA_MEM_FRAG_RX;
1354 		ret_w3 = rc;
1355 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1356 	} else {
1357 		ret_fid = FFA_SUCCESS_32;
1358 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1359 	}
1360 out:
1361 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1362 }
1363 
1364 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1365 {
1366 	struct mem_frag_state *s = NULL;
1367 
1368 	SLIST_FOREACH(s, &frag_state_head, link)
1369 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1370 			return s;
1371 
1372 	return NULL;
1373 }
1374 
1375 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1376 			       struct ffa_rxtx *rxtx)
1377 {
1378 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1379 	size_t flen = args->a3;
1380 	uint32_t endpoint_id = args->a4;
1381 	struct mem_frag_state *s = NULL;
1382 	tee_mm_entry_t *mm = NULL;
1383 	unsigned int page_count = 0;
1384 	void *buf = NULL;
1385 	uint32_t ret_w1 = 0;
1386 	uint32_t ret_w2 = 0;
1387 	uint32_t ret_w3 = 0;
1388 	uint32_t ret_fid = 0;
1389 	int rc = 0;
1390 
1391 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1392 		uint16_t guest_id = endpoint_id >> 16;
1393 
1394 		if (!guest_id || virt_set_guest(guest_id)) {
1395 			rc = FFA_INVALID_PARAMETERS;
1396 			goto out_set_rc;
1397 		}
1398 	}
1399 
1400 	/*
1401 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1402 	 * requests.
1403 	 */
1404 
1405 	cpu_spin_lock(&rxtx->spinlock);
1406 
1407 	s = get_frag_state(global_handle);
1408 	if (!s) {
1409 		rc = FFA_INVALID_PARAMETERS;
1410 		goto out;
1411 	}
1412 
1413 	mm = s->mm;
1414 	if (mm) {
1415 		if (flen > tee_mm_get_bytes(mm)) {
1416 			rc = FFA_INVALID_PARAMETERS;
1417 			goto out;
1418 		}
1419 		page_count = s->share.page_count;
1420 		buf = (void *)tee_mm_get_smem(mm);
1421 	} else {
1422 		if (flen > rxtx->size) {
1423 			rc = FFA_INVALID_PARAMETERS;
1424 			goto out;
1425 		}
1426 		buf = rxtx->rx;
1427 	}
1428 
1429 	rc = add_mem_share_frag(s, buf, flen);
1430 out:
1431 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1432 		virt_unset_guest();
1433 
1434 	cpu_spin_unlock(&rxtx->spinlock);
1435 
1436 	if (rc <= 0 && mm) {
1437 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1438 		tee_mm_free(mm);
1439 	}
1440 
1441 out_set_rc:
1442 	if (rc < 0) {
1443 		ret_fid = FFA_ERROR;
1444 		ret_w2 = rc;
1445 	} else if (rc > 0) {
1446 		ret_fid = FFA_MEM_FRAG_RX;
1447 		ret_w3 = rc;
1448 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1449 	} else {
1450 		ret_fid = FFA_SUCCESS_32;
1451 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1452 	}
1453 
1454 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1455 }
1456 
1457 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1458 {
1459 	int rc = FFA_INVALID_PARAMETERS;
1460 	uint64_t cookie = 0;
1461 
1462 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1463 		goto out;
1464 
1465 	cookie = reg_pair_to_64(args->a2, args->a1);
1466 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1467 		uint16_t guest_id = 0;
1468 
1469 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1470 			guest_id = virt_find_guest_by_cookie(cookie);
1471 		} else {
1472 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1473 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1474 		}
1475 		if (!guest_id)
1476 			goto out;
1477 		if (virt_set_guest(guest_id)) {
1478 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1479 								      cookie))
1480 				rc = FFA_OK;
1481 			goto out;
1482 		}
1483 	}
1484 
1485 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1486 	case TEE_SUCCESS:
1487 		rc = FFA_OK;
1488 		break;
1489 	case TEE_ERROR_ITEM_NOT_FOUND:
1490 		DMSG("cookie %#"PRIx64" not found", cookie);
1491 		rc = FFA_INVALID_PARAMETERS;
1492 		break;
1493 	default:
1494 		DMSG("cookie %#"PRIx64" busy", cookie);
1495 		rc = FFA_DENIED;
1496 		break;
1497 	}
1498 
1499 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1500 		virt_unset_guest();
1501 
1502 out:
1503 	set_simple_ret_val(args, rc);
1504 }
1505 
1506 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1507 {
1508 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1509 	uint32_t ret_fid = FFA_ERROR;
1510 	uint32_t old_itr_status = 0;
1511 
1512 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1513 	    !args->a5 && !args->a6 && !args->a7) {
1514 		struct guest_partition *prtn = NULL;
1515 		struct notif_vm_bitmap *nvb = NULL;
1516 		uint16_t vm_id = args->a1;
1517 
1518 		prtn = virt_get_guest(vm_id);
1519 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1520 		if (!nvb) {
1521 			ret_val = FFA_INVALID_PARAMETERS;
1522 			goto out_virt_put;
1523 		}
1524 
1525 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1526 
1527 		if (nvb->initialized) {
1528 			ret_val = FFA_DENIED;
1529 			goto out_unlock;
1530 		}
1531 
1532 		nvb->initialized = true;
1533 		nvb->do_bottom_half_value = -1;
1534 		ret_val = FFA_OK;
1535 		ret_fid = FFA_SUCCESS_32;
1536 out_unlock:
1537 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1538 out_virt_put:
1539 		virt_put_guest(prtn);
1540 	}
1541 
1542 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1543 }
1544 
1545 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1546 {
1547 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1548 	uint32_t ret_fid = FFA_ERROR;
1549 	uint32_t old_itr_status = 0;
1550 
1551 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1552 	    !args->a5 && !args->a6 && !args->a7) {
1553 		struct guest_partition *prtn = NULL;
1554 		struct notif_vm_bitmap *nvb = NULL;
1555 		uint16_t vm_id = args->a1;
1556 
1557 		prtn = virt_get_guest(vm_id);
1558 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1559 		if (!nvb) {
1560 			ret_val = FFA_INVALID_PARAMETERS;
1561 			goto out_virt_put;
1562 		}
1563 
1564 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1565 
1566 		if (nvb->pending || nvb->bound) {
1567 			ret_val = FFA_DENIED;
1568 			goto out_unlock;
1569 		}
1570 
1571 		memset(nvb, 0, sizeof(*nvb));
1572 		ret_val = FFA_OK;
1573 		ret_fid = FFA_SUCCESS_32;
1574 out_unlock:
1575 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1576 out_virt_put:
1577 		virt_put_guest(prtn);
1578 	}
1579 
1580 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1581 }
1582 
1583 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1584 {
1585 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1586 	struct guest_partition *prtn = NULL;
1587 	struct notif_vm_bitmap *nvb = NULL;
1588 	uint32_t ret_fid = FFA_ERROR;
1589 	uint32_t old_itr_status = 0;
1590 	uint64_t bitmap = 0;
1591 	uint16_t vm_id = 0;
1592 
1593 	if (args->a5 || args->a6 || args->a7)
1594 		goto out;
1595 	if (args->a2) {
1596 		/* We only deal with global notifications */
1597 		ret_val = FFA_DENIED;
1598 		goto out;
1599 	}
1600 
1601 	/* The destination of the eventual notification */
1602 	vm_id = FFA_DST(args->a1);
1603 	bitmap = reg_pair_to_64(args->a4, args->a3);
1604 
1605 	prtn = virt_get_guest(vm_id);
1606 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1607 	if (!nvb) {
1608 		ret_val = FFA_INVALID_PARAMETERS;
1609 		goto out_virt_put;
1610 	}
1611 
1612 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1613 
1614 	if ((bitmap & nvb->bound)) {
1615 		ret_val = FFA_DENIED;
1616 	} else {
1617 		nvb->bound |= bitmap;
1618 		ret_val = FFA_OK;
1619 		ret_fid = FFA_SUCCESS_32;
1620 	}
1621 
1622 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1623 out_virt_put:
1624 	virt_put_guest(prtn);
1625 out:
1626 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1627 }
1628 
1629 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1630 {
1631 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1632 	struct guest_partition *prtn = NULL;
1633 	struct notif_vm_bitmap *nvb = NULL;
1634 	uint32_t ret_fid = FFA_ERROR;
1635 	uint32_t old_itr_status = 0;
1636 	uint64_t bitmap = 0;
1637 	uint16_t vm_id = 0;
1638 
1639 	if (args->a2 || args->a5 || args->a6 || args->a7)
1640 		goto out;
1641 
1642 	/* The destination of the eventual notification */
1643 	vm_id = FFA_DST(args->a1);
1644 	bitmap = reg_pair_to_64(args->a4, args->a3);
1645 
1646 	prtn = virt_get_guest(vm_id);
1647 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1648 	if (!nvb) {
1649 		ret_val = FFA_INVALID_PARAMETERS;
1650 		goto out_virt_put;
1651 	}
1652 
1653 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1654 
1655 	if (bitmap & nvb->pending) {
1656 		ret_val = FFA_DENIED;
1657 	} else {
1658 		nvb->bound &= ~bitmap;
1659 		ret_val = FFA_OK;
1660 		ret_fid = FFA_SUCCESS_32;
1661 	}
1662 
1663 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1664 out_virt_put:
1665 	virt_put_guest(prtn);
1666 out:
1667 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1668 }
1669 
1670 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1671 {
1672 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1673 	struct guest_partition *prtn = NULL;
1674 	struct notif_vm_bitmap *nvb = NULL;
1675 	uint32_t ret_fid = FFA_ERROR;
1676 	uint32_t old_itr_status = 0;
1677 	uint16_t vm_id = 0;
1678 	uint32_t w3 = 0;
1679 
1680 	if (args->a5 || args->a6 || args->a7)
1681 		goto out;
1682 	if (!(args->a2 & 0x1)) {
1683 		ret_fid = FFA_SUCCESS_32;
1684 		w2 = 0;
1685 		goto out;
1686 	}
1687 	vm_id = FFA_DST(args->a1);
1688 
1689 	prtn = virt_get_guest(vm_id);
1690 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1691 	if (!nvb)
1692 		goto out_virt_put;
1693 
1694 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1695 
1696 	reg_pair_from_64(nvb->pending, &w3, &w2);
1697 	nvb->pending = 0;
1698 	ret_fid = FFA_SUCCESS_32;
1699 
1700 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1701 out_virt_put:
1702 	virt_put_guest(prtn);
1703 out:
1704 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1705 }
1706 
1707 struct notif_info_get_state {
1708 	struct thread_smc_1_2_regs *args;
1709 	unsigned int ids_per_reg;
1710 	unsigned int ids_count;
1711 	unsigned int id_pos;
1712 	unsigned int count;
1713 	unsigned int max_list_count;
1714 	unsigned int list_count;
1715 };
1716 
1717 static bool add_id_in_regs(struct notif_info_get_state *state,
1718 			   uint16_t id)
1719 {
1720 	unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1721 	unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1722 
1723 	if (reg_idx > 7)
1724 		return false;
1725 
1726 	state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1727 	state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1728 
1729 	state->id_pos++;
1730 	state->count++;
1731 	return true;
1732 }
1733 
1734 static bool add_id_count(struct notif_info_get_state *state)
1735 {
1736 	assert(state->list_count < state->max_list_count &&
1737 	       state->count >= 1 && state->count <= 4);
1738 
1739 	state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1740 	state->list_count++;
1741 	state->count = 0;
1742 
1743 	return state->list_count < state->max_list_count;
1744 }
1745 
1746 static bool add_nvb_to_state(struct notif_info_get_state *state,
1747 			     uint16_t guest_id, struct notif_vm_bitmap *nvb)
1748 {
1749 	if (!nvb->pending)
1750 		return true;
1751 	/*
1752 	 * Add only the guest_id, meaning a global notification for this
1753 	 * guest.
1754 	 *
1755 	 * If notifications for one or more specific vCPUs we'd add those
1756 	 * before calling add_id_count(), but that's not supported.
1757 	 */
1758 	return add_id_in_regs(state, guest_id) && add_id_count(state);
1759 }
1760 
1761 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1762 {
1763 	struct notif_info_get_state state = { .args = args };
1764 	uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1765 	struct guest_partition *prtn = NULL;
1766 	struct notif_vm_bitmap *nvb = NULL;
1767 	uint32_t more_pending_flag = 0;
1768 	uint32_t itr_state = 0;
1769 	uint16_t guest_id = 0;
1770 
1771 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1772 	    args->a6 || args->a7)
1773 		goto err;
1774 
1775 	if (OPTEE_SMC_IS_64(args->a0)) {
1776 		spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1777 		state.ids_per_reg = 4;
1778 		state.max_list_count = 31;
1779 	} else {
1780 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1781 		state.ids_per_reg = 2;
1782 		state.max_list_count = 15;
1783 	}
1784 
1785 	while (true) {
1786 		/*
1787 		 * With NS-Virtualization we need to go through all
1788 		 * partitions to collect the notification bitmaps, without
1789 		 * we just check the only notification bitmap we have.
1790 		 */
1791 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1792 			prtn = virt_next_guest(prtn);
1793 			if (!prtn)
1794 				break;
1795 			guest_id = virt_get_guest_id(prtn);
1796 		}
1797 		nvb = get_notif_vm_bitmap(prtn, guest_id);
1798 
1799 		itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1800 		if (!add_nvb_to_state(&state, guest_id, nvb))
1801 			more_pending_flag = BIT(0);
1802 		cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1803 
1804 		if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1805 			break;
1806 	}
1807 	virt_put_guest(prtn);
1808 
1809 	if (!state.id_pos) {
1810 		ffa_res = FFA_NO_DATA;
1811 		goto err;
1812 	}
1813 	args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1814 		   (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1815 		   more_pending_flag;
1816 	return;
1817 err:
1818 	spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1819 }
1820 
1821 void thread_spmc_set_async_notif_intid(int intid)
1822 {
1823 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1824 	notif_intid = intid;
1825 	spmc_notif_is_ready = true;
1826 	DMSG("Asynchronous notifications are ready");
1827 }
1828 
1829 void notif_send_async(uint32_t value, uint16_t guest_id)
1830 {
1831 	struct guest_partition *prtn = NULL;
1832 	struct notif_vm_bitmap *nvb = NULL;
1833 	uint32_t old_itr_status = 0;
1834 
1835 	prtn = virt_get_guest(guest_id);
1836 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1837 
1838 	if (nvb) {
1839 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1840 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1841 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1842 		       notif_intid >= 0);
1843 		nvb->pending |= BIT64(nvb->do_bottom_half_value);
1844 		interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1845 				    ITR_CPU_MASK_TO_THIS_CPU);
1846 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1847 	}
1848 
1849 	virt_put_guest(prtn);
1850 }
1851 #else
1852 void notif_send_async(uint32_t value, uint16_t guest_id)
1853 {
1854 	struct guest_partition *prtn = NULL;
1855 	struct notif_vm_bitmap *nvb = NULL;
1856 	/* global notification, delay notification interrupt */
1857 	uint32_t flags = BIT32(1);
1858 	int res = 0;
1859 
1860 	prtn = virt_get_guest(guest_id);
1861 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1862 
1863 	if (nvb) {
1864 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1865 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1866 		res = ffa_set_notification(guest_id, optee_endpoint_id, flags,
1867 					   BIT64(nvb->do_bottom_half_value));
1868 		if (res) {
1869 			EMSG("notification set failed with error %d", res);
1870 			panic();
1871 		}
1872 	}
1873 
1874 	virt_put_guest(prtn);
1875 }
1876 #endif
1877 
1878 /* Only called from assembly */
1879 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
1880 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1881 {
1882 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1883 	switch (args->a0) {
1884 #if defined(CFG_CORE_SEL1_SPMC)
1885 	case FFA_FEATURES:
1886 		handle_features(args);
1887 		break;
1888 	case FFA_SPM_ID_GET:
1889 		spmc_handle_spm_id_get(args);
1890 		break;
1891 #ifdef ARM64
1892 	case FFA_RXTX_MAP_64:
1893 #endif
1894 	case FFA_RXTX_MAP_32:
1895 		spmc_handle_rxtx_map(args, &my_rxtx);
1896 		break;
1897 	case FFA_RXTX_UNMAP:
1898 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1899 		break;
1900 	case FFA_RX_RELEASE:
1901 		spmc_handle_rx_release(args, &my_rxtx);
1902 		break;
1903 	case FFA_PARTITION_INFO_GET:
1904 		spmc_handle_partition_info_get(args, &my_rxtx);
1905 		break;
1906 	case FFA_RUN:
1907 		spmc_handle_run(args);
1908 		break;
1909 #endif /*CFG_CORE_SEL1_SPMC*/
1910 	case FFA_INTERRUPT:
1911 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1912 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1913 				      0, 0);
1914 		else
1915 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1916 		break;
1917 #ifdef ARM64
1918 	case FFA_MSG_SEND_DIRECT_REQ_64:
1919 #endif
1920 	case FFA_MSG_SEND_DIRECT_REQ_32:
1921 		handle_direct_request(args, &my_rxtx);
1922 		break;
1923 #if defined(CFG_CORE_SEL1_SPMC)
1924 #ifdef ARM64
1925 	case FFA_MEM_SHARE_64:
1926 #endif
1927 	case FFA_MEM_SHARE_32:
1928 		handle_mem_share(args, &my_rxtx);
1929 		break;
1930 	case FFA_MEM_RECLAIM:
1931 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1932 		    !ffa_mem_reclaim(args, NULL))
1933 			handle_mem_reclaim(args);
1934 		break;
1935 	case FFA_MEM_FRAG_TX:
1936 		handle_mem_frag_tx(args, &my_rxtx);
1937 		break;
1938 	case FFA_NOTIFICATION_BITMAP_CREATE:
1939 		handle_notification_bitmap_create(args);
1940 		break;
1941 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1942 		handle_notification_bitmap_destroy(args);
1943 		break;
1944 	case FFA_NOTIFICATION_BIND:
1945 		handle_notification_bind(args);
1946 		break;
1947 	case FFA_NOTIFICATION_UNBIND:
1948 		handle_notification_unbind(args);
1949 		break;
1950 	case FFA_NOTIFICATION_GET:
1951 		handle_notification_get(args);
1952 		break;
1953 #ifdef ARM64
1954 	case FFA_NOTIFICATION_INFO_GET_64:
1955 #endif
1956 	case FFA_NOTIFICATION_INFO_GET_32:
1957 		handle_notification_info_get(args);
1958 		break;
1959 #endif /*CFG_CORE_SEL1_SPMC*/
1960 	case FFA_ERROR:
1961 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1962 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1963 			/*
1964 			 * The SPMC will return an FFA_ERROR back so better
1965 			 * panic() now than flooding the log.
1966 			 */
1967 			panic("FFA_ERROR from SPMC is fatal");
1968 		}
1969 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1970 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1971 		break;
1972 	default:
1973 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1974 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
1975 	}
1976 }
1977 
1978 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1979 {
1980 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1981 	struct thread_ctx *thr = threads + thread_get_id();
1982 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1983 	struct optee_msg_arg *arg = NULL;
1984 	struct mobj *mobj = NULL;
1985 	uint32_t num_params = 0;
1986 	size_t sz = 0;
1987 
1988 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1989 	if (!mobj) {
1990 		EMSG("Can't find cookie %#"PRIx64, cookie);
1991 		return TEE_ERROR_BAD_PARAMETERS;
1992 	}
1993 
1994 	res = mobj_inc_map(mobj);
1995 	if (res)
1996 		goto out_put_mobj;
1997 
1998 	res = TEE_ERROR_BAD_PARAMETERS;
1999 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
2000 	if (!arg)
2001 		goto out_dec_map;
2002 
2003 	num_params = READ_ONCE(arg->num_params);
2004 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2005 		goto out_dec_map;
2006 
2007 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2008 
2009 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2010 	if (!thr->rpc_arg)
2011 		goto out_dec_map;
2012 
2013 	virt_on_stdcall();
2014 	res = tee_entry_std(arg, num_params);
2015 
2016 	thread_rpc_shm_cache_clear(&thr->shm_cache);
2017 	thr->rpc_arg = NULL;
2018 
2019 out_dec_map:
2020 	mobj_dec_map(mobj);
2021 out_put_mobj:
2022 	mobj_put(mobj);
2023 	return res;
2024 }
2025 
2026 /*
2027  * Helper routine for the assembly function thread_std_smc_entry()
2028  *
2029  * Note: this function is weak just to make link_dummies_paged.c happy.
2030  */
2031 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2032 				       uint32_t a2, uint32_t a3,
2033 				       uint32_t a4, uint32_t a5 __unused)
2034 {
2035 	/*
2036 	 * Arguments are supplied from handle_yielding_call() as:
2037 	 * a0 <- w1
2038 	 * a1 <- w3
2039 	 * a2 <- w4
2040 	 * a3 <- w5
2041 	 * a4 <- w6
2042 	 * a5 <- w7
2043 	 */
2044 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2045 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2046 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2047 	return FFA_DENIED;
2048 }
2049 
2050 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2051 {
2052 	uint64_t offs = tpm->u.memref.offs;
2053 
2054 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2055 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2056 
2057 	param->u.fmem.offs_low = offs;
2058 	param->u.fmem.offs_high = offs >> 32;
2059 	if (param->u.fmem.offs_high != offs >> 32)
2060 		return false;
2061 
2062 	param->u.fmem.size = tpm->u.memref.size;
2063 	if (tpm->u.memref.mobj) {
2064 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2065 
2066 		/* If a mobj is passed it better be one with a valid cookie. */
2067 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2068 			return false;
2069 		param->u.fmem.global_id = cookie;
2070 	} else {
2071 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2072 	}
2073 
2074 	return true;
2075 }
2076 
2077 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2078 			    struct thread_param *params,
2079 			    struct optee_msg_arg **arg_ret)
2080 {
2081 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2082 	struct thread_ctx *thr = threads + thread_get_id();
2083 	struct optee_msg_arg *arg = thr->rpc_arg;
2084 
2085 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2086 		return TEE_ERROR_BAD_PARAMETERS;
2087 
2088 	if (!arg) {
2089 		EMSG("rpc_arg not set");
2090 		return TEE_ERROR_GENERIC;
2091 	}
2092 
2093 	memset(arg, 0, sz);
2094 	arg->cmd = cmd;
2095 	arg->num_params = num_params;
2096 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2097 
2098 	for (size_t n = 0; n < num_params; n++) {
2099 		switch (params[n].attr) {
2100 		case THREAD_PARAM_ATTR_NONE:
2101 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2102 			break;
2103 		case THREAD_PARAM_ATTR_VALUE_IN:
2104 		case THREAD_PARAM_ATTR_VALUE_OUT:
2105 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2106 			arg->params[n].attr = params[n].attr -
2107 					      THREAD_PARAM_ATTR_VALUE_IN +
2108 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2109 			arg->params[n].u.value.a = params[n].u.value.a;
2110 			arg->params[n].u.value.b = params[n].u.value.b;
2111 			arg->params[n].u.value.c = params[n].u.value.c;
2112 			break;
2113 		case THREAD_PARAM_ATTR_MEMREF_IN:
2114 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2115 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2116 			if (!set_fmem(arg->params + n, params + n))
2117 				return TEE_ERROR_BAD_PARAMETERS;
2118 			break;
2119 		default:
2120 			return TEE_ERROR_BAD_PARAMETERS;
2121 		}
2122 	}
2123 
2124 	if (arg_ret)
2125 		*arg_ret = arg;
2126 
2127 	return TEE_SUCCESS;
2128 }
2129 
2130 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2131 				struct thread_param *params)
2132 {
2133 	for (size_t n = 0; n < num_params; n++) {
2134 		switch (params[n].attr) {
2135 		case THREAD_PARAM_ATTR_VALUE_OUT:
2136 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2137 			params[n].u.value.a = arg->params[n].u.value.a;
2138 			params[n].u.value.b = arg->params[n].u.value.b;
2139 			params[n].u.value.c = arg->params[n].u.value.c;
2140 			break;
2141 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2142 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2143 			params[n].u.memref.size = arg->params[n].u.fmem.size;
2144 			break;
2145 		default:
2146 			break;
2147 		}
2148 	}
2149 
2150 	return arg->ret;
2151 }
2152 
2153 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2154 			struct thread_param *params)
2155 {
2156 	struct thread_rpc_arg rpc_arg = { .call = {
2157 			.w1 = thread_get_tsd()->rpc_target_info,
2158 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2159 		},
2160 	};
2161 	struct optee_msg_arg *arg = NULL;
2162 	uint32_t ret = 0;
2163 
2164 	ret = get_rpc_arg(cmd, num_params, params, &arg);
2165 	if (ret)
2166 		return ret;
2167 
2168 	thread_rpc(&rpc_arg);
2169 
2170 	return get_rpc_arg_res(arg, num_params, params);
2171 }
2172 
2173 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2174 {
2175 	struct thread_rpc_arg rpc_arg = { .call = {
2176 			.w1 = thread_get_tsd()->rpc_target_info,
2177 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2178 		},
2179 	};
2180 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2181 	uint32_t res2 = 0;
2182 	uint32_t res = 0;
2183 
2184 	DMSG("freeing cookie %#"PRIx64, cookie);
2185 
2186 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
2187 
2188 	mobj_put(mobj);
2189 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2190 	if (res2)
2191 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2192 		     cookie, res2);
2193 	if (!res)
2194 		thread_rpc(&rpc_arg);
2195 }
2196 
2197 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2198 {
2199 	struct thread_rpc_arg rpc_arg = { .call = {
2200 			.w1 = thread_get_tsd()->rpc_target_info,
2201 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2202 		},
2203 	};
2204 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2205 	struct optee_msg_arg *arg = NULL;
2206 	unsigned int internal_offset = 0;
2207 	struct mobj *mobj = NULL;
2208 	uint64_t cookie = 0;
2209 
2210 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2211 		return NULL;
2212 
2213 	thread_rpc(&rpc_arg);
2214 
2215 	if (arg->num_params != 1 ||
2216 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2217 		return NULL;
2218 
2219 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2220 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2221 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2222 	if (!mobj) {
2223 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2224 		     cookie, internal_offset);
2225 		return NULL;
2226 	}
2227 
2228 	assert(mobj_is_nonsec(mobj));
2229 
2230 	if (mobj->size < size) {
2231 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2232 		mobj_put(mobj);
2233 		return NULL;
2234 	}
2235 
2236 	if (mobj_inc_map(mobj)) {
2237 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2238 		mobj_put(mobj);
2239 		return NULL;
2240 	}
2241 
2242 	return mobj;
2243 }
2244 
2245 struct mobj *thread_rpc_alloc_payload(size_t size)
2246 {
2247 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2248 }
2249 
2250 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2251 {
2252 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2253 }
2254 
2255 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2256 {
2257 	if (mobj)
2258 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2259 				mobj_get_cookie(mobj), mobj);
2260 }
2261 
2262 void thread_rpc_free_payload(struct mobj *mobj)
2263 {
2264 	if (mobj)
2265 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2266 				mobj);
2267 }
2268 
2269 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2270 {
2271 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2272 }
2273 
2274 void thread_rpc_free_global_payload(struct mobj *mobj)
2275 {
2276 	if (mobj)
2277 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2278 				mobj_get_cookie(mobj), mobj);
2279 }
2280 
2281 void thread_spmc_register_secondary_ep(vaddr_t ep)
2282 {
2283 	unsigned long ret = 0;
2284 
2285 	/* Let the SPM know the entry point for secondary CPUs */
2286 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2287 
2288 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2289 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2290 }
2291 
2292 static uint16_t ffa_id_get(void)
2293 {
2294 	/*
2295 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2296 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2297 	 * the partition ID (if not).
2298 	 */
2299 	struct thread_smc_args args = {
2300 		.a0 = FFA_ID_GET,
2301 	};
2302 
2303 	thread_smccc(&args);
2304 	if (!is_ffa_success(args.a0)) {
2305 		if (args.a0 == FFA_ERROR)
2306 			EMSG("Get id failed with error %ld", args.a2);
2307 		else
2308 			EMSG("Get id failed");
2309 		panic();
2310 	}
2311 
2312 	return args.a2;
2313 }
2314 
2315 static uint16_t ffa_spm_id_get(void)
2316 {
2317 	/*
2318 	 * Ask the SPM component running at a higher EL to return its ID.
2319 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2320 	 * If not, the ID of the SPMC will be returned.
2321 	 */
2322 	struct thread_smc_args args = {
2323 		.a0 = FFA_SPM_ID_GET,
2324 	};
2325 
2326 	thread_smccc(&args);
2327 	if (!is_ffa_success(args.a0)) {
2328 		if (args.a0 == FFA_ERROR)
2329 			EMSG("Get spm id failed with error %ld", args.a2);
2330 		else
2331 			EMSG("Get spm id failed");
2332 		panic();
2333 	}
2334 
2335 	return args.a2;
2336 }
2337 
2338 #if defined(CFG_CORE_SEL1_SPMC)
2339 static TEE_Result spmc_init(void)
2340 {
2341 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2342 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2343 				     sizeof(struct notif_vm_bitmap), NULL))
2344 		panic("virt_add_guest_spec_data");
2345 	spmd_id = ffa_spm_id_get();
2346 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2347 
2348 	spmc_id = ffa_id_get();
2349 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2350 
2351 	optee_endpoint_id = FFA_SWD_ID_MIN;
2352 	while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id)
2353 		optee_endpoint_id++;
2354 
2355 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2356 
2357 	/*
2358 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2359 	 * normal world regardless of what version we query the SPM with.
2360 	 * However, if SPMD think we are version 1.1 it will forward
2361 	 * queries from normal world to let us negotiate version. So by
2362 	 * setting version 1.0 here we should be compatible.
2363 	 *
2364 	 * Note that disagreement on negotiated version means that we'll
2365 	 * have communication problems with normal world.
2366 	 */
2367 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2368 
2369 	return TEE_SUCCESS;
2370 }
2371 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2372 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2373 {
2374 	struct thread_smc_args args = {
2375 #ifdef ARM64
2376 		.a0 = FFA_RXTX_MAP_64,
2377 #else
2378 		.a0 = FFA_RXTX_MAP_32,
2379 #endif
2380 		.a1 = virt_to_phys(rxtx->tx),
2381 		.a2 = virt_to_phys(rxtx->rx),
2382 		.a3 = 1,
2383 	};
2384 
2385 	thread_smccc(&args);
2386 	if (!is_ffa_success(args.a0)) {
2387 		if (args.a0 == FFA_ERROR)
2388 			EMSG("rxtx map failed with error %ld", args.a2);
2389 		else
2390 			EMSG("rxtx map failed");
2391 		panic();
2392 	}
2393 }
2394 
2395 static uint32_t get_ffa_version(uint32_t my_version)
2396 {
2397 	struct thread_smc_args args = {
2398 		.a0 = FFA_VERSION,
2399 		.a1 = my_version,
2400 	};
2401 
2402 	thread_smccc(&args);
2403 	if (args.a0 & BIT(31)) {
2404 		EMSG("FF-A version failed with error %ld", args.a0);
2405 		panic();
2406 	}
2407 
2408 	return args.a0;
2409 }
2410 
2411 static void *spmc_retrieve_req(uint64_t cookie,
2412 			       struct ffa_mem_transaction_x *trans)
2413 {
2414 	struct ffa_mem_access *acc_descr_array = NULL;
2415 	struct ffa_mem_access_perm *perm_descr = NULL;
2416 	struct thread_smc_args args = {
2417 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2418 		.a3 =	0,	/* Address, Using TX -> MBZ */
2419 		.a4 =   0,	/* Using TX -> MBZ */
2420 	};
2421 	size_t size = 0;
2422 	int rc = 0;
2423 
2424 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2425 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2426 
2427 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2428 		memset(trans_descr, 0, size);
2429 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2430 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2431 		trans_descr->global_handle = cookie;
2432 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2433 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2434 		trans_descr->mem_access_count = 1;
2435 		acc_descr_array = trans_descr->mem_access_array;
2436 	} else {
2437 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2438 
2439 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2440 		memset(trans_descr, 0, size);
2441 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2442 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2443 		trans_descr->global_handle = cookie;
2444 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2445 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2446 		trans_descr->mem_access_count = 1;
2447 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2448 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2449 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2450 					   sizeof(*trans_descr));
2451 	}
2452 	acc_descr_array->region_offs = 0;
2453 	acc_descr_array->reserved = 0;
2454 	perm_descr = &acc_descr_array->access_perm;
2455 	perm_descr->endpoint_id = optee_endpoint_id;
2456 	perm_descr->perm = FFA_MEM_ACC_RW;
2457 	perm_descr->flags = 0;
2458 
2459 	args.a1 = size; /* Total Length */
2460 	args.a2 = size; /* Frag Length == Total length */
2461 	thread_smccc(&args);
2462 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2463 		if (args.a0 == FFA_ERROR)
2464 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2465 			     cookie, (int)args.a2);
2466 		else
2467 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2468 			     cookie, args.a0);
2469 		return NULL;
2470 	}
2471 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2472 				       my_rxtx.size, trans);
2473 	if (rc) {
2474 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2475 		     cookie, rc);
2476 		return NULL;
2477 	}
2478 
2479 	return my_rxtx.rx;
2480 }
2481 
2482 void thread_spmc_relinquish(uint64_t cookie)
2483 {
2484 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2485 	struct thread_smc_args args = {
2486 		.a0 = FFA_MEM_RELINQUISH,
2487 	};
2488 
2489 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2490 	relinquish_desc->handle = cookie;
2491 	relinquish_desc->flags = 0;
2492 	relinquish_desc->endpoint_count = 1;
2493 	relinquish_desc->endpoint_id_array[0] = optee_endpoint_id;
2494 	thread_smccc(&args);
2495 	if (!is_ffa_success(args.a0))
2496 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2497 }
2498 
2499 static int set_pages(struct ffa_address_range *regions,
2500 		     unsigned int num_regions, unsigned int num_pages,
2501 		     struct mobj_ffa *mf)
2502 {
2503 	unsigned int n = 0;
2504 	unsigned int idx = 0;
2505 
2506 	for (n = 0; n < num_regions; n++) {
2507 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2508 		uint64_t addr = READ_ONCE(regions[n].address);
2509 
2510 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2511 			return FFA_INVALID_PARAMETERS;
2512 	}
2513 
2514 	if (idx != num_pages)
2515 		return FFA_INVALID_PARAMETERS;
2516 
2517 	return 0;
2518 }
2519 
2520 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2521 {
2522 	struct mobj_ffa *ret = NULL;
2523 	struct ffa_mem_transaction_x retrieve_desc = { };
2524 	struct ffa_mem_access *descr_array = NULL;
2525 	struct ffa_mem_region *descr = NULL;
2526 	struct mobj_ffa *mf = NULL;
2527 	unsigned int num_pages = 0;
2528 	unsigned int offs = 0;
2529 	void *buf = NULL;
2530 	struct thread_smc_args ffa_rx_release_args = {
2531 		.a0 = FFA_RX_RELEASE
2532 	};
2533 
2534 	/*
2535 	 * OP-TEE is only supporting a single mem_region while the
2536 	 * specification allows for more than one.
2537 	 */
2538 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2539 	if (!buf) {
2540 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2541 		     cookie);
2542 		return NULL;
2543 	}
2544 
2545 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2546 	offs = READ_ONCE(descr_array->region_offs);
2547 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2548 
2549 	num_pages = READ_ONCE(descr->total_page_count);
2550 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2551 	if (!mf)
2552 		goto out;
2553 
2554 	if (set_pages(descr->address_range_array,
2555 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2556 		mobj_ffa_spmc_delete(mf);
2557 		goto out;
2558 	}
2559 
2560 	ret = mf;
2561 
2562 out:
2563 	/* Release RX buffer after the mem retrieve request. */
2564 	thread_smccc(&ffa_rx_release_args);
2565 
2566 	return ret;
2567 }
2568 
2569 static TEE_Result spmc_init(void)
2570 {
2571 	unsigned int major = 0;
2572 	unsigned int minor __maybe_unused = 0;
2573 	uint32_t my_vers = 0;
2574 	uint32_t vers = 0;
2575 
2576 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2577 	vers = get_ffa_version(my_vers);
2578 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2579 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2580 	DMSG("SPMC reported version %u.%u", major, minor);
2581 	if (major != FFA_VERSION_MAJOR) {
2582 		EMSG("Incompatible major version %u, expected %u",
2583 		     major, FFA_VERSION_MAJOR);
2584 		panic();
2585 	}
2586 	if (vers < my_vers)
2587 		my_vers = vers;
2588 	DMSG("Using version %u.%u",
2589 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2590 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2591 	my_rxtx.ffa_vers = my_vers;
2592 
2593 	spmc_rxtx_map(&my_rxtx);
2594 
2595 	spmc_id = ffa_spm_id_get();
2596 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2597 
2598 	optee_endpoint_id = ffa_id_get();
2599 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2600 
2601 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2602 		spmc_notif_is_ready = true;
2603 		DMSG("Asynchronous notifications are ready");
2604 	}
2605 
2606 	return TEE_SUCCESS;
2607 }
2608 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2609 
2610 nex_service_init(spmc_init);
2611