xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision c3deb3d6f3b13d0e17fc9efe5880aec039e47594)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2025, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/dt.h>
12 #include <kernel/interrupt.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/secure_partition.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/spmc_sp_handler.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_private.h>
21 #include <kernel/thread_spmc.h>
22 #include <kernel/virtualization.h>
23 #include <libfdt.h>
24 #include <mm/core_mmu.h>
25 #include <mm/mobj.h>
26 #include <optee_ffa.h>
27 #include <optee_msg.h>
28 #include <optee_rpc_cmd.h>
29 #include <sm/optee_smc.h>
30 #include <string.h>
31 #include <sys/queue.h>
32 #include <tee/entry_std.h>
33 #include <tee/uuid.h>
34 #include <util.h>
35 
36 #if defined(CFG_CORE_SEL1_SPMC)
37 struct mem_share_state {
38 	struct mobj_ffa *mf;
39 	unsigned int page_count;
40 	unsigned int region_count;
41 	unsigned int current_page_idx;
42 };
43 
44 struct mem_frag_state {
45 	struct mem_share_state share;
46 	tee_mm_entry_t *mm;
47 	unsigned int frag_offset;
48 	SLIST_ENTRY(mem_frag_state) link;
49 };
50 #endif
51 
52 struct notif_vm_bitmap {
53 	bool initialized;
54 	int do_bottom_half_value;
55 	uint64_t pending;
56 	uint64_t bound;
57 };
58 
59 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
60 static bool spmc_notif_is_ready __nex_bss;
61 static int notif_intid __nex_data __maybe_unused = -1;
62 
63 /* Id used to look up the guest specific struct notif_vm_bitmap */
64 static unsigned int notif_vm_bitmap_id __nex_bss;
65 /* Notification state when ns-virtualization isn't enabled */
66 static struct notif_vm_bitmap default_notif_vm_bitmap;
67 
68 /* Initialized in spmc_init() below */
69 uint16_t optee_endpoint_id __nex_bss;
70 uint16_t spmc_id __nex_bss;
71 #ifdef CFG_CORE_SEL1_SPMC
72 uint16_t spmd_id __nex_bss;
73 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
74 				      FFA_PART_PROP_DIRECT_REQ_SEND |
75 #ifdef CFG_NS_VIRTUALIZATION
76 				      FFA_PART_PROP_NOTIF_CREATED |
77 				      FFA_PART_PROP_NOTIF_DESTROYED |
78 #endif
79 #ifdef ARM64
80 				      FFA_PART_PROP_AARCH64_STATE |
81 #endif
82 				      FFA_PART_PROP_IS_PE_ID;
83 
84 static uint32_t my_uuid_words[] = {
85 	/*
86 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
87 	 *   SP, or
88 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
89 	 *   logical partition, residing in the same exception level as the
90 	 *   SPMC
91 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
92 	 */
93 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
94 };
95 
96 /*
97  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
98  *
99  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
100  * access this includes the use of content of struct ffa_rxtx::rx and
101  * @frag_state_head.
102  *
103  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
104  * ffa_rxtx::tx and false when it is owned by normal world.
105  *
106  * Note that we can't prevent normal world from updating the content of
107  * these buffers so we must always be careful when reading. while we hold
108  * the lock.
109  */
110 
111 static struct ffa_rxtx my_rxtx __nex_bss;
112 
113 static bool is_nw_buf(struct ffa_rxtx *rxtx)
114 {
115 	return rxtx == &my_rxtx;
116 }
117 
118 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
119 	SLIST_HEAD_INITIALIZER(&frag_state_head);
120 
121 #else
122 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
123 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
124 static struct ffa_rxtx my_rxtx __nex_data = {
125 	.rx = __rx_buf,
126 	.tx = __tx_buf,
127 	.size = sizeof(__rx_buf),
128 };
129 #endif
130 
131 static uint32_t swap_src_dst(uint32_t src_dst)
132 {
133 	return (src_dst >> 16) | (src_dst << 16);
134 }
135 
136 static uint16_t get_sender_id(uint32_t src_dst)
137 {
138 	return src_dst >> 16;
139 }
140 
141 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
142 		   uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
143 		   uint32_t w5)
144 {
145 	*args = (struct thread_smc_1_2_regs){
146 		.a0 = fid,
147 		.a1 = src_dst,
148 		.a2 = w2,
149 		.a3 = w3,
150 		.a4 = w4,
151 		.a5 = w5,
152 	};
153 }
154 
155 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
156 {
157 	if (ffa_ret)
158 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
159 	else
160 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
161 }
162 
163 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
164 {
165 	uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers);
166 	uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers);
167 	uint32_t my_vers = FFA_VERSION_1_2;
168 	uint32_t my_major_vers = 0;
169 	uint32_t my_minor_vers = 0;
170 
171 	my_major_vers = FFA_GET_MAJOR_VERSION(my_vers);
172 	my_minor_vers = FFA_GET_MINOR_VERSION(my_vers);
173 
174 	/*
175 	 * No locking, if the caller does concurrent calls to this it's
176 	 * only making a mess for itself. We must be able to renegotiate
177 	 * the FF-A version in order to support differing versions between
178 	 * the loader and the driver.
179 	 *
180 	 * Callers should use the version requested if we return a matching
181 	 * major version and a matching or larger minor version. The caller
182 	 * should downgrade to our minor version if our minor version is
183 	 * smaller. Regardless, always return our version as recommended by
184 	 * the specification.
185 	 */
186 	if (major_vers == my_major_vers) {
187 		if (minor_vers > my_minor_vers)
188 			rxtx->ffa_vers = my_vers;
189 		else
190 			rxtx->ffa_vers = vers;
191 	}
192 
193 	return my_vers;
194 }
195 
196 static bool is_ffa_success(uint32_t fid)
197 {
198 #ifdef ARM64
199 	if (fid == FFA_SUCCESS_64)
200 		return true;
201 #endif
202 	return fid == FFA_SUCCESS_32;
203 }
204 
205 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
206 {
207 	if (is_ffa_success(args->a0))
208 		return FFA_OK;
209 	if (args->a0 == FFA_ERROR && args->a2)
210 		return args->a2;
211 	return FFA_NOT_SUPPORTED;
212 }
213 
214 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
215 			   unsigned long a3, unsigned long a4)
216 {
217 	struct thread_smc_args args = {
218 		.a0 = fid,
219 		.a1 = a1,
220 		.a2 = a2,
221 		.a3 = a3,
222 		.a4 = a4,
223 	};
224 
225 	thread_smccc(&args);
226 
227 	return get_ffa_ret_code(&args);
228 }
229 
230 static int __maybe_unused ffa_features(uint32_t id)
231 {
232 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
233 }
234 
235 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
236 					       uint32_t flags, uint64_t bitmap)
237 {
238 	return ffa_simple_call(FFA_NOTIFICATION_SET,
239 			       SHIFT_U32(src, 16) | dst, flags,
240 			       low32_from_64(bitmap), high32_from_64(bitmap));
241 }
242 
243 #if defined(CFG_CORE_SEL1_SPMC)
244 static void handle_features(struct thread_smc_1_2_regs *args)
245 {
246 	uint32_t ret_fid = FFA_ERROR;
247 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
248 
249 	switch (args->a1) {
250 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
251 		if (spmc_notif_is_ready) {
252 			ret_fid = FFA_SUCCESS_32;
253 			ret_w2 = notif_intid;
254 		}
255 		break;
256 
257 #ifdef ARM64
258 	case FFA_RXTX_MAP_64:
259 #endif
260 	case FFA_RXTX_MAP_32:
261 		ret_fid = FFA_SUCCESS_32;
262 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
263 		break;
264 #ifdef ARM64
265 	case FFA_MEM_SHARE_64:
266 #endif
267 	case FFA_MEM_SHARE_32:
268 		ret_fid = FFA_SUCCESS_32;
269 		/*
270 		 * Partition manager supports transmission of a memory
271 		 * transaction descriptor in a buffer dynamically allocated
272 		 * by the endpoint.
273 		 */
274 		ret_w2 = BIT(0);
275 		break;
276 
277 	case FFA_ERROR:
278 	case FFA_VERSION:
279 	case FFA_SUCCESS_32:
280 #ifdef ARM64
281 	case FFA_SUCCESS_64:
282 #endif
283 	case FFA_FEATURES:
284 	case FFA_SPM_ID_GET:
285 	case FFA_MEM_FRAG_TX:
286 	case FFA_MEM_RECLAIM:
287 	case FFA_MSG_SEND_DIRECT_REQ_64:
288 	case FFA_MSG_SEND_DIRECT_REQ_32:
289 	case FFA_INTERRUPT:
290 	case FFA_PARTITION_INFO_GET:
291 	case FFA_RXTX_UNMAP:
292 	case FFA_RX_RELEASE:
293 	case FFA_FEATURE_MANAGED_EXIT_INTR:
294 	case FFA_NOTIFICATION_BITMAP_CREATE:
295 	case FFA_NOTIFICATION_BITMAP_DESTROY:
296 	case FFA_NOTIFICATION_BIND:
297 	case FFA_NOTIFICATION_UNBIND:
298 	case FFA_NOTIFICATION_SET:
299 	case FFA_NOTIFICATION_GET:
300 	case FFA_NOTIFICATION_INFO_GET_32:
301 #ifdef ARM64
302 	case FFA_NOTIFICATION_INFO_GET_64:
303 #endif
304 		ret_fid = FFA_SUCCESS_32;
305 		ret_w2 = FFA_PARAM_MBZ;
306 		break;
307 	default:
308 		break;
309 	}
310 
311 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
312 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
313 }
314 
315 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
316 {
317 	tee_mm_entry_t *mm = NULL;
318 
319 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
320 		return FFA_INVALID_PARAMETERS;
321 
322 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
323 	if (!mm)
324 		return FFA_NO_MEMORY;
325 
326 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
327 					  sz / SMALL_PAGE_SIZE,
328 					  MEM_AREA_NSEC_SHM)) {
329 		tee_mm_free(mm);
330 		return FFA_INVALID_PARAMETERS;
331 	}
332 
333 	*va_ret = (void *)tee_mm_get_smem(mm);
334 	return 0;
335 }
336 
337 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
338 {
339 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id,
340 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
341 }
342 
343 static void unmap_buf(void *va, size_t sz)
344 {
345 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
346 
347 	assert(mm);
348 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
349 	tee_mm_free(mm);
350 }
351 
352 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
353 			  struct ffa_rxtx *rxtx)
354 {
355 	int rc = 0;
356 	unsigned int sz = 0;
357 	paddr_t rx_pa = 0;
358 	paddr_t tx_pa = 0;
359 	void *rx = NULL;
360 	void *tx = NULL;
361 
362 	cpu_spin_lock(&rxtx->spinlock);
363 
364 	if (args->a3 & GENMASK_64(63, 6)) {
365 		rc = FFA_INVALID_PARAMETERS;
366 		goto out;
367 	}
368 
369 	sz = args->a3 * SMALL_PAGE_SIZE;
370 	if (!sz) {
371 		rc = FFA_INVALID_PARAMETERS;
372 		goto out;
373 	}
374 	/* TX/RX are swapped compared to the caller */
375 	tx_pa = args->a2;
376 	rx_pa = args->a1;
377 
378 	if (rxtx->size) {
379 		rc = FFA_DENIED;
380 		goto out;
381 	}
382 
383 	/*
384 	 * If the buffer comes from a SP the address is virtual and already
385 	 * mapped.
386 	 */
387 	if (is_nw_buf(rxtx)) {
388 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
389 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
390 			bool tx_alloced = false;
391 
392 			/*
393 			 * With virtualization we establish this mapping in
394 			 * the nexus mapping which then is replicated to
395 			 * each partition.
396 			 *
397 			 * This means that this mapping must be done before
398 			 * any partition is created and then must not be
399 			 * changed.
400 			 */
401 
402 			/*
403 			 * core_mmu_add_mapping() may reuse previous
404 			 * mappings. First check if there's any mappings to
405 			 * reuse so we know how to clean up in case of
406 			 * failure.
407 			 */
408 			tx = phys_to_virt(tx_pa, mt, sz);
409 			rx = phys_to_virt(rx_pa, mt, sz);
410 			if (!tx) {
411 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
412 				if (!tx) {
413 					rc = FFA_NO_MEMORY;
414 					goto out;
415 				}
416 				tx_alloced = true;
417 			}
418 			if (!rx)
419 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
420 
421 			if (!rx) {
422 				if (tx_alloced && tx)
423 					core_mmu_remove_mapping(mt, tx, sz);
424 				rc = FFA_NO_MEMORY;
425 				goto out;
426 			}
427 		} else {
428 			rc = map_buf(tx_pa, sz, &tx);
429 			if (rc)
430 				goto out;
431 			rc = map_buf(rx_pa, sz, &rx);
432 			if (rc) {
433 				unmap_buf(tx, sz);
434 				goto out;
435 			}
436 		}
437 		rxtx->tx = tx;
438 		rxtx->rx = rx;
439 	} else {
440 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
441 			rc = FFA_INVALID_PARAMETERS;
442 			goto out;
443 		}
444 
445 		if (!virt_to_phys((void *)tx_pa) ||
446 		    !virt_to_phys((void *)rx_pa)) {
447 			rc = FFA_INVALID_PARAMETERS;
448 			goto out;
449 		}
450 
451 		rxtx->tx = (void *)tx_pa;
452 		rxtx->rx = (void *)rx_pa;
453 	}
454 
455 	rxtx->size = sz;
456 	rxtx->tx_is_mine = true;
457 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
458 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
459 out:
460 	cpu_spin_unlock(&rxtx->spinlock);
461 	set_simple_ret_val(args, rc);
462 }
463 
464 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
465 			    struct ffa_rxtx *rxtx)
466 {
467 	int rc = FFA_INVALID_PARAMETERS;
468 
469 	cpu_spin_lock(&rxtx->spinlock);
470 
471 	if (!rxtx->size)
472 		goto out;
473 
474 	/*
475 	 * We don't unmap the SP memory as the SP might still use it.
476 	 * We avoid to make changes to nexus mappings at this stage since
477 	 * there currently isn't a way to replicate those changes to all
478 	 * partitions.
479 	 */
480 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
481 		unmap_buf(rxtx->rx, rxtx->size);
482 		unmap_buf(rxtx->tx, rxtx->size);
483 	}
484 	rxtx->size = 0;
485 	rxtx->rx = NULL;
486 	rxtx->tx = NULL;
487 	rc = 0;
488 out:
489 	cpu_spin_unlock(&rxtx->spinlock);
490 	set_simple_ret_val(args, rc);
491 }
492 
493 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
494 			    struct ffa_rxtx *rxtx)
495 {
496 	int rc = 0;
497 
498 	cpu_spin_lock(&rxtx->spinlock);
499 	/* The senders RX is our TX */
500 	if (!rxtx->size || rxtx->tx_is_mine) {
501 		rc = FFA_DENIED;
502 	} else {
503 		rc = 0;
504 		rxtx->tx_is_mine = true;
505 	}
506 	cpu_spin_unlock(&rxtx->spinlock);
507 
508 	set_simple_ret_val(args, rc);
509 }
510 
511 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
512 {
513 	return !w0 && !w1 && !w2 && !w3;
514 }
515 
516 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
517 {
518 	/*
519 	 * This depends on which UUID we have been assigned.
520 	 * TODO add a generic mechanism to obtain our UUID.
521 	 *
522 	 * The test below is for the hard coded UUID
523 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
524 	 */
525 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
526 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
527 }
528 
529 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
530 				     size_t idx, uint16_t endpoint_id,
531 				     uint16_t execution_context,
532 				     uint32_t part_props,
533 				     const uint32_t uuid_words[4])
534 {
535 	struct ffa_partition_info_x *fpi = NULL;
536 	size_t fpi_size = sizeof(*fpi);
537 
538 	if (ffa_vers >= FFA_VERSION_1_1)
539 		fpi_size += FFA_UUID_SIZE;
540 
541 	if ((idx + 1) * fpi_size > blen)
542 		return TEE_ERROR_OUT_OF_MEMORY;
543 
544 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
545 	fpi->id = endpoint_id;
546 	/* Number of execution contexts implemented by this partition */
547 	fpi->execution_context = execution_context;
548 
549 	fpi->partition_properties = part_props;
550 
551 	/* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
552 	if (ffa_vers < FFA_VERSION_1_1)
553 		fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
554 					     FFA_PART_PROP_DIRECT_REQ_SEND |
555 					     FFA_PART_PROP_INDIRECT_MSGS;
556 
557 	if (ffa_vers >= FFA_VERSION_1_1) {
558 		if (uuid_words)
559 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
560 		else
561 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
562 	}
563 
564 	return TEE_SUCCESS;
565 }
566 
567 static int handle_partition_info_get_all(size_t *elem_count,
568 					 struct ffa_rxtx *rxtx, bool count_only)
569 {
570 	if (!count_only) {
571 		/* Add OP-TEE SP */
572 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
573 					      rxtx->size, 0, optee_endpoint_id,
574 					      CFG_TEE_CORE_NB_CORE,
575 					      my_part_props, my_uuid_words))
576 			return FFA_NO_MEMORY;
577 	}
578 	*elem_count = 1;
579 
580 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
581 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
582 					  NULL, elem_count, count_only))
583 			return FFA_NO_MEMORY;
584 	}
585 
586 	return FFA_OK;
587 }
588 
589 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
590 				    struct ffa_rxtx *rxtx)
591 {
592 	TEE_Result res = TEE_SUCCESS;
593 	uint32_t ret_fid = FFA_ERROR;
594 	uint32_t fpi_size = 0;
595 	uint32_t rc = 0;
596 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
597 
598 	if (!count_only) {
599 		cpu_spin_lock(&rxtx->spinlock);
600 
601 		if (!rxtx->size || !rxtx->tx_is_mine) {
602 			rc = FFA_BUSY;
603 			goto out;
604 		}
605 	}
606 
607 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
608 		size_t elem_count = 0;
609 
610 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
611 							count_only);
612 
613 		if (ret_fid) {
614 			rc = ret_fid;
615 			ret_fid = FFA_ERROR;
616 		} else {
617 			ret_fid = FFA_SUCCESS_32;
618 			rc = elem_count;
619 		}
620 
621 		goto out;
622 	}
623 
624 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
625 		if (!count_only) {
626 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
627 							rxtx->tx, rxtx->size, 0,
628 							optee_endpoint_id,
629 							CFG_TEE_CORE_NB_CORE,
630 							my_part_props,
631 							my_uuid_words);
632 			if (res) {
633 				ret_fid = FFA_ERROR;
634 				rc = FFA_INVALID_PARAMETERS;
635 				goto out;
636 			}
637 		}
638 		rc = 1;
639 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
640 		uint32_t uuid_array[4] = { 0 };
641 		TEE_UUID uuid = { };
642 		size_t count = 0;
643 
644 		uuid_array[0] = args->a1;
645 		uuid_array[1] = args->a2;
646 		uuid_array[2] = args->a3;
647 		uuid_array[3] = args->a4;
648 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
649 
650 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
651 					    rxtx->size, &uuid, &count,
652 					    count_only);
653 		if (res != TEE_SUCCESS) {
654 			ret_fid = FFA_ERROR;
655 			rc = FFA_INVALID_PARAMETERS;
656 			goto out;
657 		}
658 		rc = count;
659 	} else {
660 		ret_fid = FFA_ERROR;
661 		rc = FFA_INVALID_PARAMETERS;
662 		goto out;
663 	}
664 
665 	ret_fid = FFA_SUCCESS_32;
666 
667 out:
668 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
669 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
670 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
671 
672 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
673 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
674 	if (!count_only) {
675 		rxtx->tx_is_mine = false;
676 		cpu_spin_unlock(&rxtx->spinlock);
677 	}
678 }
679 
680 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
681 {
682 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
683 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
684 	uint32_t rc = FFA_OK;
685 
686 	if (endpoint != optee_endpoint_id) {
687 		/*
688 		 * The endpoint should be an SP, try to resume the SP from
689 		 * preempted into busy state.
690 		 */
691 		rc = spmc_sp_resume_from_preempted(endpoint);
692 		if (rc)
693 			goto out;
694 	}
695 
696 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
697 
698 	/* thread_resume_from_rpc return only of the thread_id is invalid */
699 	rc = FFA_INVALID_PARAMETERS;
700 
701 out:
702 	set_simple_ret_val(args, rc);
703 }
704 #endif /*CFG_CORE_SEL1_SPMC*/
705 
706 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
707 						   uint16_t vm_id)
708 {
709 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
710 		if (!prtn)
711 			return NULL;
712 		assert(vm_id == virt_get_guest_id(prtn));
713 		return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
714 	}
715 	if (vm_id)
716 		return NULL;
717 	return &default_notif_vm_bitmap;
718 }
719 
720 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
721 					uint16_t vm_id)
722 {
723 	struct guest_partition *prtn = NULL;
724 	struct notif_vm_bitmap *nvb = NULL;
725 	uint32_t old_itr_status = 0;
726 	uint32_t res = 0;
727 
728 	if (!spmc_notif_is_ready) {
729 		/*
730 		 * This should never happen, not if normal world respects the
731 		 * exchanged capabilities.
732 		 */
733 		EMSG("Asynchronous notifications are not ready");
734 		return TEE_ERROR_NOT_IMPLEMENTED;
735 	}
736 
737 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
738 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
739 		return TEE_ERROR_BAD_PARAMETERS;
740 	}
741 
742 	prtn = virt_get_guest(vm_id);
743 	nvb = get_notif_vm_bitmap(prtn, vm_id);
744 	if (!nvb) {
745 		res = TEE_ERROR_BAD_PARAMETERS;
746 		goto out;
747 	}
748 
749 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
750 	nvb->do_bottom_half_value = bottom_half_value;
751 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
752 
753 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
754 	res = TEE_SUCCESS;
755 out:
756 	virt_put_guest(prtn);
757 	return res;
758 }
759 
760 static void handle_yielding_call(struct thread_smc_1_2_regs *args,
761 				 uint32_t direct_resp_fid)
762 {
763 	TEE_Result res = 0;
764 
765 	thread_check_canaries();
766 
767 #ifdef ARM64
768 	/* Saving this for an eventual RPC */
769 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
770 #endif
771 
772 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
773 		/* Note connection to struct thread_rpc_arg::ret */
774 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
775 				       0);
776 		res = TEE_ERROR_BAD_PARAMETERS;
777 	} else {
778 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
779 				     args->a6, args->a7);
780 		res = TEE_ERROR_BUSY;
781 	}
782 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
783 		      0, res, 0, 0);
784 }
785 
786 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
787 {
788 	uint64_t cookie = reg_pair_to_64(a5, a4);
789 	uint32_t res = 0;
790 
791 	res = mobj_ffa_unregister_by_cookie(cookie);
792 	switch (res) {
793 	case TEE_SUCCESS:
794 	case TEE_ERROR_ITEM_NOT_FOUND:
795 		return 0;
796 	case TEE_ERROR_BUSY:
797 		EMSG("res %#"PRIx32, res);
798 		return FFA_BUSY;
799 	default:
800 		EMSG("res %#"PRIx32, res);
801 		return FFA_INVALID_PARAMETERS;
802 	}
803 }
804 
805 static void handle_blocking_call(struct thread_smc_1_2_regs *args,
806 				 uint32_t direct_resp_fid)
807 {
808 	uint32_t sec_caps = 0;
809 
810 	switch (args->a3) {
811 	case OPTEE_FFA_GET_API_VERSION:
812 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
813 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
814 			      0);
815 		break;
816 	case OPTEE_FFA_GET_OS_VERSION:
817 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
818 			      CFG_OPTEE_REVISION_MAJOR,
819 			      CFG_OPTEE_REVISION_MINOR,
820 			      TEE_IMPL_GIT_SHA1 >> 32);
821 		break;
822 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
823 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
824 		if (spmc_notif_is_ready)
825 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
826 		if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
827 			sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
828 		spmc_set_args(args, direct_resp_fid,
829 			      swap_src_dst(args->a1), 0, 0,
830 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
831 		break;
832 	case OPTEE_FFA_UNREGISTER_SHM:
833 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
834 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
835 		break;
836 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
837 		spmc_set_args(args, direct_resp_fid,
838 			      swap_src_dst(args->a1), 0,
839 			      spmc_enable_async_notif(args->a4,
840 						      FFA_SRC(args->a1)),
841 			      0, 0);
842 		break;
843 	default:
844 		EMSG("Unhandled blocking service ID %#"PRIx32,
845 		     (uint32_t)args->a3);
846 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
847 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
848 	}
849 }
850 
851 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args,
852 					    struct ffa_rxtx *rxtx,
853 					    uint32_t direct_resp_fid)
854 {
855 	uint32_t w0 = FFA_ERROR;
856 	uint32_t w1 = FFA_PARAM_MBZ;
857 	uint32_t w2 = FFA_NOT_SUPPORTED;
858 	uint32_t w3 = FFA_PARAM_MBZ;
859 
860 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
861 	case FFA_MSG_SEND_VM_CREATED:
862 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
863 			uint16_t guest_id = args->a5;
864 			TEE_Result res = virt_guest_created(guest_id);
865 
866 			w0 = direct_resp_fid;
867 			w1 = swap_src_dst(args->a1);
868 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
869 			if (res == TEE_SUCCESS)
870 				w3 = FFA_OK;
871 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
872 				w3 = FFA_DENIED;
873 			else
874 				w3 = FFA_INVALID_PARAMETERS;
875 		}
876 		break;
877 	case FFA_MSG_SEND_VM_DESTROYED:
878 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
879 			uint16_t guest_id = args->a5;
880 			TEE_Result res = virt_guest_destroyed(guest_id);
881 
882 			w0 = direct_resp_fid;
883 			w1 = swap_src_dst(args->a1);
884 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
885 			if (res == TEE_SUCCESS)
886 				w3 = FFA_OK;
887 			else
888 				w3 = FFA_INVALID_PARAMETERS;
889 		}
890 		break;
891 	case FFA_MSG_VERSION_REQ:
892 		w0 = direct_resp_fid;
893 		w1 = swap_src_dst(args->a1);
894 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
895 		w3 = spmc_exchange_version(args->a3, rxtx);
896 		break;
897 	default:
898 		break;
899 	}
900 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
901 }
902 
903 static void handle_direct_request(struct thread_smc_1_2_regs *args,
904 				  struct ffa_rxtx *rxtx)
905 {
906 	uint32_t direct_resp_fid = 0;
907 
908 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
909 	    FFA_DST(args->a1) != spmc_id &&
910 	    FFA_DST(args->a1) != optee_endpoint_id) {
911 		spmc_sp_start_thread(args);
912 		return;
913 	}
914 
915 	if (OPTEE_SMC_IS_64(args->a0))
916 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
917 	else
918 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
919 
920 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
921 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
922 		return;
923 	}
924 
925 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
926 	    virt_set_guest(get_sender_id(args->a1))) {
927 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
928 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
929 		return;
930 	}
931 
932 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
933 		handle_yielding_call(args, direct_resp_fid);
934 	else
935 		handle_blocking_call(args, direct_resp_fid);
936 
937 	/*
938 	 * Note that handle_yielding_call() typically only returns if a
939 	 * thread cannot be allocated or found. virt_unset_guest() is also
940 	 * called from thread_state_suspend() and thread_state_free().
941 	 */
942 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
943 		virt_unset_guest();
944 }
945 
946 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
947 			      struct ffa_mem_transaction_x *trans)
948 {
949 	uint16_t mem_reg_attr = 0;
950 	uint32_t flags = 0;
951 	uint32_t count = 0;
952 	uint32_t offs = 0;
953 	uint32_t size = 0;
954 	size_t n = 0;
955 
956 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
957 		return FFA_INVALID_PARAMETERS;
958 
959 	if (ffa_vers >= FFA_VERSION_1_1) {
960 		struct ffa_mem_transaction_1_1 *descr = NULL;
961 
962 		if (blen < sizeof(*descr))
963 			return FFA_INVALID_PARAMETERS;
964 
965 		descr = buf;
966 		trans->sender_id = READ_ONCE(descr->sender_id);
967 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
968 		flags = READ_ONCE(descr->flags);
969 		trans->global_handle = READ_ONCE(descr->global_handle);
970 		trans->tag = READ_ONCE(descr->tag);
971 
972 		count = READ_ONCE(descr->mem_access_count);
973 		size = READ_ONCE(descr->mem_access_size);
974 		offs = READ_ONCE(descr->mem_access_offs);
975 	} else {
976 		struct ffa_mem_transaction_1_0 *descr = NULL;
977 
978 		if (blen < sizeof(*descr))
979 			return FFA_INVALID_PARAMETERS;
980 
981 		descr = buf;
982 		trans->sender_id = READ_ONCE(descr->sender_id);
983 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
984 		flags = READ_ONCE(descr->flags);
985 		trans->global_handle = READ_ONCE(descr->global_handle);
986 		trans->tag = READ_ONCE(descr->tag);
987 
988 		count = READ_ONCE(descr->mem_access_count);
989 		size = sizeof(struct ffa_mem_access);
990 		offs = offsetof(struct ffa_mem_transaction_1_0,
991 				mem_access_array);
992 	}
993 
994 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
995 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
996 		return FFA_INVALID_PARAMETERS;
997 
998 	/* Check that the endpoint memory access descriptor array fits */
999 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
1000 	    n > blen)
1001 		return FFA_INVALID_PARAMETERS;
1002 
1003 	trans->mem_reg_attr = mem_reg_attr;
1004 	trans->flags = flags;
1005 	trans->mem_access_size = size;
1006 	trans->mem_access_count = count;
1007 	trans->mem_access_offs = offs;
1008 	return 0;
1009 }
1010 
1011 #if defined(CFG_CORE_SEL1_SPMC)
1012 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
1013 			 unsigned int mem_access_count, uint8_t *acc_perms,
1014 			 unsigned int *region_offs)
1015 {
1016 	struct ffa_mem_access_perm *descr = NULL;
1017 	struct ffa_mem_access *mem_acc = NULL;
1018 	unsigned int n = 0;
1019 
1020 	for (n = 0; n < mem_access_count; n++) {
1021 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1022 		descr = &mem_acc->access_perm;
1023 		if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) {
1024 			*acc_perms = READ_ONCE(descr->perm);
1025 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
1026 			return 0;
1027 		}
1028 	}
1029 
1030 	return FFA_INVALID_PARAMETERS;
1031 }
1032 
1033 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
1034 			  size_t blen, unsigned int *page_count,
1035 			  unsigned int *region_count, size_t *addr_range_offs)
1036 {
1037 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1038 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1039 	struct ffa_mem_region *region_descr = NULL;
1040 	unsigned int region_descr_offs = 0;
1041 	uint8_t mem_acc_perm = 0;
1042 	size_t n = 0;
1043 
1044 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1045 		return FFA_INVALID_PARAMETERS;
1046 
1047 	/* Check that the access permissions matches what's expected */
1048 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1049 			  mem_trans->mem_access_size,
1050 			  mem_trans->mem_access_count,
1051 			  &mem_acc_perm, &region_descr_offs) ||
1052 	    mem_acc_perm != exp_mem_acc_perm)
1053 		return FFA_INVALID_PARAMETERS;
1054 
1055 	/* Check that the Composite memory region descriptor fits */
1056 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1057 	    n > blen)
1058 		return FFA_INVALID_PARAMETERS;
1059 
1060 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1061 				  struct ffa_mem_region))
1062 		return FFA_INVALID_PARAMETERS;
1063 
1064 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1065 						 region_descr_offs);
1066 	*page_count = READ_ONCE(region_descr->total_page_count);
1067 	*region_count = READ_ONCE(region_descr->address_range_count);
1068 	*addr_range_offs = n;
1069 	return 0;
1070 }
1071 
1072 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1073 				size_t flen)
1074 {
1075 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1076 	struct ffa_address_range *arange = NULL;
1077 	unsigned int n = 0;
1078 
1079 	if (region_count > s->region_count)
1080 		region_count = s->region_count;
1081 
1082 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1083 		return FFA_INVALID_PARAMETERS;
1084 	arange = buf;
1085 
1086 	for (n = 0; n < region_count; n++) {
1087 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1088 		uint64_t addr = READ_ONCE(arange[n].address);
1089 
1090 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1091 					  addr, page_count))
1092 			return FFA_INVALID_PARAMETERS;
1093 	}
1094 
1095 	s->region_count -= region_count;
1096 	if (s->region_count)
1097 		return region_count * sizeof(*arange);
1098 
1099 	if (s->current_page_idx != s->page_count)
1100 		return FFA_INVALID_PARAMETERS;
1101 
1102 	return 0;
1103 }
1104 
1105 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1106 {
1107 	int rc = 0;
1108 
1109 	rc = add_mem_share_helper(&s->share, buf, flen);
1110 	if (rc >= 0) {
1111 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1112 			/* We're not at the end of the descriptor yet */
1113 			if (s->share.region_count)
1114 				return s->frag_offset;
1115 
1116 			/* We're done */
1117 			rc = 0;
1118 		} else {
1119 			rc = FFA_INVALID_PARAMETERS;
1120 		}
1121 	}
1122 
1123 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1124 	if (rc < 0)
1125 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1126 	else
1127 		mobj_ffa_push_to_inactive(s->share.mf);
1128 	free(s);
1129 
1130 	return rc;
1131 }
1132 
1133 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1134 			void *buf)
1135 {
1136 	struct ffa_mem_access_perm *perm = NULL;
1137 	struct ffa_mem_access *mem_acc = NULL;
1138 
1139 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1140 		return false;
1141 
1142 	if (mem_trans->mem_access_count < 1)
1143 		return false;
1144 
1145 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1146 	perm = &mem_acc->access_perm;
1147 
1148 	/*
1149 	 * perm->endpoint_id is read here only to check if the endpoint is
1150 	 * OP-TEE. We do read it later on again, but there are some additional
1151 	 * checks there to make sure that the data is correct.
1152 	 */
1153 	return READ_ONCE(perm->endpoint_id) != optee_endpoint_id;
1154 }
1155 
1156 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1157 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1158 			 size_t flen, uint64_t *global_handle)
1159 {
1160 	int rc = 0;
1161 	struct mem_share_state share = { };
1162 	size_t addr_range_offs = 0;
1163 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1164 	size_t n = 0;
1165 
1166 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1167 			    &share.region_count, &addr_range_offs);
1168 	if (rc)
1169 		return rc;
1170 
1171 	if (!share.page_count || !share.region_count)
1172 		return FFA_INVALID_PARAMETERS;
1173 
1174 	if (MUL_OVERFLOW(share.region_count,
1175 			 sizeof(struct ffa_address_range), &n) ||
1176 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1177 		return FFA_INVALID_PARAMETERS;
1178 
1179 	if (mem_trans->global_handle)
1180 		cookie = mem_trans->global_handle;
1181 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1182 	if (!share.mf)
1183 		return FFA_NO_MEMORY;
1184 
1185 	if (flen != blen) {
1186 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1187 
1188 		if (!s) {
1189 			rc = FFA_NO_MEMORY;
1190 			goto err;
1191 		}
1192 		s->share = share;
1193 		s->mm = mm;
1194 		s->frag_offset = addr_range_offs;
1195 
1196 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1197 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1198 					flen - addr_range_offs);
1199 
1200 		if (rc >= 0)
1201 			*global_handle = mobj_ffa_get_cookie(share.mf);
1202 
1203 		return rc;
1204 	}
1205 
1206 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1207 				  flen - addr_range_offs);
1208 	if (rc) {
1209 		/*
1210 		 * Number of consumed bytes may be returned instead of 0 for
1211 		 * done.
1212 		 */
1213 		rc = FFA_INVALID_PARAMETERS;
1214 		goto err;
1215 	}
1216 
1217 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1218 
1219 	return 0;
1220 err:
1221 	mobj_ffa_sel1_spmc_delete(share.mf);
1222 	return rc;
1223 }
1224 
1225 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1226 				 unsigned int page_count,
1227 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1228 {
1229 	struct ffa_mem_transaction_x mem_trans = { };
1230 	int rc = 0;
1231 	size_t len = 0;
1232 	void *buf = NULL;
1233 	tee_mm_entry_t *mm = NULL;
1234 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1235 
1236 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1237 		return FFA_INVALID_PARAMETERS;
1238 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1239 		return FFA_INVALID_PARAMETERS;
1240 
1241 	/*
1242 	 * Check that the length reported in flen is covered by len even
1243 	 * if the offset is taken into account.
1244 	 */
1245 	if (len < flen || len - offs < flen)
1246 		return FFA_INVALID_PARAMETERS;
1247 
1248 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1249 	if (!mm)
1250 		return FFA_NO_MEMORY;
1251 
1252 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1253 					  page_count, MEM_AREA_NSEC_SHM)) {
1254 		rc = FFA_INVALID_PARAMETERS;
1255 		goto out;
1256 	}
1257 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1258 
1259 	cpu_spin_lock(&rxtx->spinlock);
1260 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1261 	if (rc)
1262 		goto unlock;
1263 
1264 	if (is_sp_share(&mem_trans, buf)) {
1265 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1266 				       global_handle, NULL);
1267 		goto unlock;
1268 	}
1269 
1270 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1271 	    virt_set_guest(mem_trans.sender_id)) {
1272 		rc = FFA_DENIED;
1273 		goto unlock;
1274 	}
1275 
1276 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1277 
1278 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1279 		virt_unset_guest();
1280 
1281 unlock:
1282 	cpu_spin_unlock(&rxtx->spinlock);
1283 	if (rc > 0)
1284 		return rc;
1285 
1286 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1287 out:
1288 	tee_mm_free(mm);
1289 	return rc;
1290 }
1291 
1292 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1293 				  uint64_t *global_handle,
1294 				  struct ffa_rxtx *rxtx)
1295 {
1296 	struct ffa_mem_transaction_x mem_trans = { };
1297 	int rc = FFA_DENIED;
1298 
1299 	cpu_spin_lock(&rxtx->spinlock);
1300 
1301 	if (!rxtx->rx || flen > rxtx->size)
1302 		goto out;
1303 
1304 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1305 				       &mem_trans);
1306 	if (rc)
1307 		goto out;
1308 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1309 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1310 				       global_handle, NULL);
1311 		goto out;
1312 	}
1313 
1314 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1315 	    virt_set_guest(mem_trans.sender_id))
1316 		goto out;
1317 
1318 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1319 			   global_handle);
1320 
1321 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1322 		virt_unset_guest();
1323 
1324 out:
1325 	cpu_spin_unlock(&rxtx->spinlock);
1326 
1327 	return rc;
1328 }
1329 
1330 static void handle_mem_share(struct thread_smc_1_2_regs *args,
1331 			     struct ffa_rxtx *rxtx)
1332 {
1333 	uint32_t tot_len = args->a1;
1334 	uint32_t frag_len = args->a2;
1335 	uint64_t addr = args->a3;
1336 	uint32_t page_count = args->a4;
1337 	uint32_t ret_w1 = 0;
1338 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1339 	uint32_t ret_w3 = 0;
1340 	uint32_t ret_fid = FFA_ERROR;
1341 	uint64_t global_handle = 0;
1342 	int rc = 0;
1343 
1344 	/* Check that the MBZs are indeed 0 */
1345 	if (args->a5 || args->a6 || args->a7)
1346 		goto out;
1347 
1348 	/* Check that fragment length doesn't exceed total length */
1349 	if (frag_len > tot_len)
1350 		goto out;
1351 
1352 	/* Check for 32-bit calling convention */
1353 	if (args->a0 == FFA_MEM_SHARE_32)
1354 		addr &= UINT32_MAX;
1355 
1356 	if (!addr) {
1357 		/*
1358 		 * The memory transaction descriptor is passed via our rx
1359 		 * buffer.
1360 		 */
1361 		if (page_count)
1362 			goto out;
1363 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1364 					    rxtx);
1365 	} else {
1366 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1367 					   &global_handle, rxtx);
1368 	}
1369 	if (rc < 0) {
1370 		ret_w2 = rc;
1371 	} else if (rc > 0) {
1372 		ret_fid = FFA_MEM_FRAG_RX;
1373 		ret_w3 = rc;
1374 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1375 	} else {
1376 		ret_fid = FFA_SUCCESS_32;
1377 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1378 	}
1379 out:
1380 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1381 }
1382 
1383 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1384 {
1385 	struct mem_frag_state *s = NULL;
1386 
1387 	SLIST_FOREACH(s, &frag_state_head, link)
1388 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1389 			return s;
1390 
1391 	return NULL;
1392 }
1393 
1394 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1395 			       struct ffa_rxtx *rxtx)
1396 {
1397 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1398 	size_t flen = args->a3;
1399 	uint32_t endpoint_id = args->a4;
1400 	struct mem_frag_state *s = NULL;
1401 	tee_mm_entry_t *mm = NULL;
1402 	unsigned int page_count = 0;
1403 	void *buf = NULL;
1404 	uint32_t ret_w1 = 0;
1405 	uint32_t ret_w2 = 0;
1406 	uint32_t ret_w3 = 0;
1407 	uint32_t ret_fid = 0;
1408 	int rc = 0;
1409 
1410 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1411 		uint16_t guest_id = endpoint_id >> 16;
1412 
1413 		if (!guest_id || virt_set_guest(guest_id)) {
1414 			rc = FFA_INVALID_PARAMETERS;
1415 			goto out_set_rc;
1416 		}
1417 	}
1418 
1419 	/*
1420 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1421 	 * requests.
1422 	 */
1423 
1424 	cpu_spin_lock(&rxtx->spinlock);
1425 
1426 	s = get_frag_state(global_handle);
1427 	if (!s) {
1428 		rc = FFA_INVALID_PARAMETERS;
1429 		goto out;
1430 	}
1431 
1432 	mm = s->mm;
1433 	if (mm) {
1434 		if (flen > tee_mm_get_bytes(mm)) {
1435 			rc = FFA_INVALID_PARAMETERS;
1436 			goto out;
1437 		}
1438 		page_count = s->share.page_count;
1439 		buf = (void *)tee_mm_get_smem(mm);
1440 	} else {
1441 		if (flen > rxtx->size) {
1442 			rc = FFA_INVALID_PARAMETERS;
1443 			goto out;
1444 		}
1445 		buf = rxtx->rx;
1446 	}
1447 
1448 	rc = add_mem_share_frag(s, buf, flen);
1449 out:
1450 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1451 		virt_unset_guest();
1452 
1453 	cpu_spin_unlock(&rxtx->spinlock);
1454 
1455 	if (rc <= 0 && mm) {
1456 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1457 		tee_mm_free(mm);
1458 	}
1459 
1460 out_set_rc:
1461 	if (rc < 0) {
1462 		ret_fid = FFA_ERROR;
1463 		ret_w2 = rc;
1464 	} else if (rc > 0) {
1465 		ret_fid = FFA_MEM_FRAG_RX;
1466 		ret_w3 = rc;
1467 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1468 	} else {
1469 		ret_fid = FFA_SUCCESS_32;
1470 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1471 	}
1472 
1473 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1474 }
1475 
1476 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1477 {
1478 	int rc = FFA_INVALID_PARAMETERS;
1479 	uint64_t cookie = 0;
1480 
1481 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1482 		goto out;
1483 
1484 	cookie = reg_pair_to_64(args->a2, args->a1);
1485 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1486 		uint16_t guest_id = 0;
1487 
1488 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1489 			guest_id = virt_find_guest_by_cookie(cookie);
1490 		} else {
1491 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1492 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1493 		}
1494 		if (!guest_id)
1495 			goto out;
1496 		if (virt_set_guest(guest_id)) {
1497 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1498 								      cookie))
1499 				rc = FFA_OK;
1500 			goto out;
1501 		}
1502 	}
1503 
1504 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1505 	case TEE_SUCCESS:
1506 		rc = FFA_OK;
1507 		break;
1508 	case TEE_ERROR_ITEM_NOT_FOUND:
1509 		DMSG("cookie %#"PRIx64" not found", cookie);
1510 		rc = FFA_INVALID_PARAMETERS;
1511 		break;
1512 	default:
1513 		DMSG("cookie %#"PRIx64" busy", cookie);
1514 		rc = FFA_DENIED;
1515 		break;
1516 	}
1517 
1518 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1519 		virt_unset_guest();
1520 
1521 out:
1522 	set_simple_ret_val(args, rc);
1523 }
1524 
1525 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1526 {
1527 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1528 	uint32_t ret_fid = FFA_ERROR;
1529 	uint32_t old_itr_status = 0;
1530 
1531 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1532 	    !args->a5 && !args->a6 && !args->a7) {
1533 		struct guest_partition *prtn = NULL;
1534 		struct notif_vm_bitmap *nvb = NULL;
1535 		uint16_t vm_id = args->a1;
1536 
1537 		prtn = virt_get_guest(vm_id);
1538 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1539 		if (!nvb) {
1540 			ret_val = FFA_INVALID_PARAMETERS;
1541 			goto out_virt_put;
1542 		}
1543 
1544 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1545 
1546 		if (nvb->initialized) {
1547 			ret_val = FFA_DENIED;
1548 			goto out_unlock;
1549 		}
1550 
1551 		nvb->initialized = true;
1552 		nvb->do_bottom_half_value = -1;
1553 		ret_val = FFA_OK;
1554 		ret_fid = FFA_SUCCESS_32;
1555 out_unlock:
1556 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1557 out_virt_put:
1558 		virt_put_guest(prtn);
1559 	}
1560 
1561 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1562 }
1563 
1564 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1565 {
1566 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1567 	uint32_t ret_fid = FFA_ERROR;
1568 	uint32_t old_itr_status = 0;
1569 
1570 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1571 	    !args->a5 && !args->a6 && !args->a7) {
1572 		struct guest_partition *prtn = NULL;
1573 		struct notif_vm_bitmap *nvb = NULL;
1574 		uint16_t vm_id = args->a1;
1575 
1576 		prtn = virt_get_guest(vm_id);
1577 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1578 		if (!nvb) {
1579 			ret_val = FFA_INVALID_PARAMETERS;
1580 			goto out_virt_put;
1581 		}
1582 
1583 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1584 
1585 		if (nvb->pending || nvb->bound) {
1586 			ret_val = FFA_DENIED;
1587 			goto out_unlock;
1588 		}
1589 
1590 		memset(nvb, 0, sizeof(*nvb));
1591 		ret_val = FFA_OK;
1592 		ret_fid = FFA_SUCCESS_32;
1593 out_unlock:
1594 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1595 out_virt_put:
1596 		virt_put_guest(prtn);
1597 	}
1598 
1599 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1600 }
1601 
1602 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1603 {
1604 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1605 	struct guest_partition *prtn = NULL;
1606 	struct notif_vm_bitmap *nvb = NULL;
1607 	uint32_t ret_fid = FFA_ERROR;
1608 	uint32_t old_itr_status = 0;
1609 	uint64_t bitmap = 0;
1610 	uint16_t vm_id = 0;
1611 
1612 	if (args->a5 || args->a6 || args->a7)
1613 		goto out;
1614 	if (args->a2) {
1615 		/* We only deal with global notifications */
1616 		ret_val = FFA_DENIED;
1617 		goto out;
1618 	}
1619 
1620 	/* The destination of the eventual notification */
1621 	vm_id = FFA_DST(args->a1);
1622 	bitmap = reg_pair_to_64(args->a4, args->a3);
1623 
1624 	prtn = virt_get_guest(vm_id);
1625 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1626 	if (!nvb) {
1627 		ret_val = FFA_INVALID_PARAMETERS;
1628 		goto out_virt_put;
1629 	}
1630 
1631 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1632 
1633 	if ((bitmap & nvb->bound)) {
1634 		ret_val = FFA_DENIED;
1635 	} else {
1636 		nvb->bound |= bitmap;
1637 		ret_val = FFA_OK;
1638 		ret_fid = FFA_SUCCESS_32;
1639 	}
1640 
1641 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1642 out_virt_put:
1643 	virt_put_guest(prtn);
1644 out:
1645 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1646 }
1647 
1648 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1649 {
1650 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1651 	struct guest_partition *prtn = NULL;
1652 	struct notif_vm_bitmap *nvb = NULL;
1653 	uint32_t ret_fid = FFA_ERROR;
1654 	uint32_t old_itr_status = 0;
1655 	uint64_t bitmap = 0;
1656 	uint16_t vm_id = 0;
1657 
1658 	if (args->a2 || args->a5 || args->a6 || args->a7)
1659 		goto out;
1660 
1661 	/* The destination of the eventual notification */
1662 	vm_id = FFA_DST(args->a1);
1663 	bitmap = reg_pair_to_64(args->a4, args->a3);
1664 
1665 	prtn = virt_get_guest(vm_id);
1666 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1667 	if (!nvb) {
1668 		ret_val = FFA_INVALID_PARAMETERS;
1669 		goto out_virt_put;
1670 	}
1671 
1672 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1673 
1674 	if (bitmap & nvb->pending) {
1675 		ret_val = FFA_DENIED;
1676 	} else {
1677 		nvb->bound &= ~bitmap;
1678 		ret_val = FFA_OK;
1679 		ret_fid = FFA_SUCCESS_32;
1680 	}
1681 
1682 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1683 out_virt_put:
1684 	virt_put_guest(prtn);
1685 out:
1686 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1687 }
1688 
1689 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1690 {
1691 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1692 	struct guest_partition *prtn = NULL;
1693 	struct notif_vm_bitmap *nvb = NULL;
1694 	uint32_t ret_fid = FFA_ERROR;
1695 	uint32_t old_itr_status = 0;
1696 	uint16_t vm_id = 0;
1697 	uint32_t w3 = 0;
1698 
1699 	if (args->a5 || args->a6 || args->a7)
1700 		goto out;
1701 	if (!(args->a2 & 0x1)) {
1702 		ret_fid = FFA_SUCCESS_32;
1703 		w2 = 0;
1704 		goto out;
1705 	}
1706 	vm_id = FFA_DST(args->a1);
1707 
1708 	prtn = virt_get_guest(vm_id);
1709 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1710 	if (!nvb)
1711 		goto out_virt_put;
1712 
1713 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1714 
1715 	reg_pair_from_64(nvb->pending, &w3, &w2);
1716 	nvb->pending = 0;
1717 	ret_fid = FFA_SUCCESS_32;
1718 
1719 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1720 out_virt_put:
1721 	virt_put_guest(prtn);
1722 out:
1723 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1724 }
1725 
1726 struct notif_info_get_state {
1727 	struct thread_smc_1_2_regs *args;
1728 	unsigned int ids_per_reg;
1729 	unsigned int ids_count;
1730 	unsigned int id_pos;
1731 	unsigned int count;
1732 	unsigned int max_list_count;
1733 	unsigned int list_count;
1734 };
1735 
1736 static bool add_id_in_regs(struct notif_info_get_state *state,
1737 			   uint16_t id)
1738 {
1739 	unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1740 	unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1741 
1742 	if (reg_idx > 7)
1743 		return false;
1744 
1745 	state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1746 	state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1747 
1748 	state->id_pos++;
1749 	state->count++;
1750 	return true;
1751 }
1752 
1753 static bool add_id_count(struct notif_info_get_state *state)
1754 {
1755 	assert(state->list_count < state->max_list_count &&
1756 	       state->count >= 1 && state->count <= 4);
1757 
1758 	state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1759 	state->list_count++;
1760 	state->count = 0;
1761 
1762 	return state->list_count < state->max_list_count;
1763 }
1764 
1765 static bool add_nvb_to_state(struct notif_info_get_state *state,
1766 			     uint16_t guest_id, struct notif_vm_bitmap *nvb)
1767 {
1768 	if (!nvb->pending)
1769 		return true;
1770 	/*
1771 	 * Add only the guest_id, meaning a global notification for this
1772 	 * guest.
1773 	 *
1774 	 * If notifications for one or more specific vCPUs we'd add those
1775 	 * before calling add_id_count(), but that's not supported.
1776 	 */
1777 	return add_id_in_regs(state, guest_id) && add_id_count(state);
1778 }
1779 
1780 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1781 {
1782 	struct notif_info_get_state state = { .args = args };
1783 	uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1784 	struct guest_partition *prtn = NULL;
1785 	struct notif_vm_bitmap *nvb = NULL;
1786 	uint32_t more_pending_flag = 0;
1787 	uint32_t itr_state = 0;
1788 	uint16_t guest_id = 0;
1789 
1790 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1791 	    args->a6 || args->a7)
1792 		goto err;
1793 
1794 	if (OPTEE_SMC_IS_64(args->a0)) {
1795 		spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1796 		state.ids_per_reg = 4;
1797 		state.max_list_count = 31;
1798 	} else {
1799 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1800 		state.ids_per_reg = 2;
1801 		state.max_list_count = 15;
1802 	}
1803 
1804 	while (true) {
1805 		/*
1806 		 * With NS-Virtualization we need to go through all
1807 		 * partitions to collect the notification bitmaps, without
1808 		 * we just check the only notification bitmap we have.
1809 		 */
1810 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1811 			prtn = virt_next_guest(prtn);
1812 			if (!prtn)
1813 				break;
1814 			guest_id = virt_get_guest_id(prtn);
1815 		}
1816 		nvb = get_notif_vm_bitmap(prtn, guest_id);
1817 
1818 		itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1819 		if (!add_nvb_to_state(&state, guest_id, nvb))
1820 			more_pending_flag = BIT(0);
1821 		cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1822 
1823 		if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1824 			break;
1825 	}
1826 	virt_put_guest(prtn);
1827 
1828 	if (!state.id_pos) {
1829 		ffa_res = FFA_NO_DATA;
1830 		goto err;
1831 	}
1832 	args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1833 		   (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1834 		   more_pending_flag;
1835 	return;
1836 err:
1837 	spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1838 }
1839 
1840 void thread_spmc_set_async_notif_intid(int intid)
1841 {
1842 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1843 	notif_intid = intid;
1844 	spmc_notif_is_ready = true;
1845 	DMSG("Asynchronous notifications are ready");
1846 }
1847 
1848 void notif_send_async(uint32_t value, uint16_t guest_id)
1849 {
1850 	struct guest_partition *prtn = NULL;
1851 	struct notif_vm_bitmap *nvb = NULL;
1852 	uint32_t old_itr_status = 0;
1853 
1854 	prtn = virt_get_guest(guest_id);
1855 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1856 
1857 	if (nvb) {
1858 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1859 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1860 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1861 		       notif_intid >= 0);
1862 		nvb->pending |= BIT64(nvb->do_bottom_half_value);
1863 		interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1864 				    ITR_CPU_MASK_TO_THIS_CPU);
1865 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1866 	}
1867 
1868 	virt_put_guest(prtn);
1869 }
1870 #else
1871 void notif_send_async(uint32_t value, uint16_t guest_id)
1872 {
1873 	struct guest_partition *prtn = NULL;
1874 	struct notif_vm_bitmap *nvb = NULL;
1875 	/* global notification, delay notification interrupt */
1876 	uint32_t flags = BIT32(1);
1877 	int res = 0;
1878 
1879 	prtn = virt_get_guest(guest_id);
1880 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1881 
1882 	if (nvb) {
1883 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1884 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1885 		res = ffa_set_notification(guest_id, optee_endpoint_id, flags,
1886 					   BIT64(nvb->do_bottom_half_value));
1887 		if (res) {
1888 			EMSG("notification set failed with error %d", res);
1889 			panic();
1890 		}
1891 	}
1892 
1893 	virt_put_guest(prtn);
1894 }
1895 #endif
1896 
1897 /* Only called from assembly */
1898 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
1899 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1900 {
1901 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1902 	switch (args->a0) {
1903 #if defined(CFG_CORE_SEL1_SPMC)
1904 	case FFA_FEATURES:
1905 		handle_features(args);
1906 		break;
1907 	case FFA_SPM_ID_GET:
1908 		spmc_handle_spm_id_get(args);
1909 		break;
1910 #ifdef ARM64
1911 	case FFA_RXTX_MAP_64:
1912 #endif
1913 	case FFA_RXTX_MAP_32:
1914 		spmc_handle_rxtx_map(args, &my_rxtx);
1915 		break;
1916 	case FFA_RXTX_UNMAP:
1917 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1918 		break;
1919 	case FFA_RX_RELEASE:
1920 		spmc_handle_rx_release(args, &my_rxtx);
1921 		break;
1922 	case FFA_PARTITION_INFO_GET:
1923 		spmc_handle_partition_info_get(args, &my_rxtx);
1924 		break;
1925 	case FFA_RUN:
1926 		spmc_handle_run(args);
1927 		break;
1928 #endif /*CFG_CORE_SEL1_SPMC*/
1929 	case FFA_INTERRUPT:
1930 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1931 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1932 				      0, 0);
1933 		else
1934 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1935 		break;
1936 #ifdef ARM64
1937 	case FFA_MSG_SEND_DIRECT_REQ_64:
1938 #endif
1939 	case FFA_MSG_SEND_DIRECT_REQ_32:
1940 		handle_direct_request(args, &my_rxtx);
1941 		break;
1942 #if defined(CFG_CORE_SEL1_SPMC)
1943 #ifdef ARM64
1944 	case FFA_MEM_SHARE_64:
1945 #endif
1946 	case FFA_MEM_SHARE_32:
1947 		handle_mem_share(args, &my_rxtx);
1948 		break;
1949 	case FFA_MEM_RECLAIM:
1950 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1951 		    !ffa_mem_reclaim(args, NULL))
1952 			handle_mem_reclaim(args);
1953 		break;
1954 	case FFA_MEM_FRAG_TX:
1955 		handle_mem_frag_tx(args, &my_rxtx);
1956 		break;
1957 	case FFA_NOTIFICATION_BITMAP_CREATE:
1958 		handle_notification_bitmap_create(args);
1959 		break;
1960 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1961 		handle_notification_bitmap_destroy(args);
1962 		break;
1963 	case FFA_NOTIFICATION_BIND:
1964 		handle_notification_bind(args);
1965 		break;
1966 	case FFA_NOTIFICATION_UNBIND:
1967 		handle_notification_unbind(args);
1968 		break;
1969 	case FFA_NOTIFICATION_GET:
1970 		handle_notification_get(args);
1971 		break;
1972 #ifdef ARM64
1973 	case FFA_NOTIFICATION_INFO_GET_64:
1974 #endif
1975 	case FFA_NOTIFICATION_INFO_GET_32:
1976 		handle_notification_info_get(args);
1977 		break;
1978 #endif /*CFG_CORE_SEL1_SPMC*/
1979 	case FFA_ERROR:
1980 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1981 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1982 			/*
1983 			 * The SPMC will return an FFA_ERROR back so better
1984 			 * panic() now than flooding the log.
1985 			 */
1986 			panic("FFA_ERROR from SPMC is fatal");
1987 		}
1988 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1989 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1990 		break;
1991 	default:
1992 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1993 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
1994 	}
1995 }
1996 
1997 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1998 {
1999 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2000 	struct thread_ctx *thr = threads + thread_get_id();
2001 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2002 	struct optee_msg_arg *arg = NULL;
2003 	struct mobj *mobj = NULL;
2004 	uint32_t num_params = 0;
2005 	size_t sz = 0;
2006 
2007 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
2008 	if (!mobj) {
2009 		EMSG("Can't find cookie %#"PRIx64, cookie);
2010 		return TEE_ERROR_BAD_PARAMETERS;
2011 	}
2012 
2013 	res = mobj_inc_map(mobj);
2014 	if (res)
2015 		goto out_put_mobj;
2016 
2017 	res = TEE_ERROR_BAD_PARAMETERS;
2018 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
2019 	if (!arg)
2020 		goto out_dec_map;
2021 
2022 	num_params = READ_ONCE(arg->num_params);
2023 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2024 		goto out_dec_map;
2025 
2026 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2027 
2028 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2029 	if (!thr->rpc_arg)
2030 		goto out_dec_map;
2031 
2032 	virt_on_stdcall();
2033 	res = tee_entry_std(arg, num_params);
2034 
2035 	thread_rpc_shm_cache_clear(&thr->shm_cache);
2036 	thr->rpc_arg = NULL;
2037 
2038 out_dec_map:
2039 	mobj_dec_map(mobj);
2040 out_put_mobj:
2041 	mobj_put(mobj);
2042 	return res;
2043 }
2044 
2045 /*
2046  * Helper routine for the assembly function thread_std_smc_entry()
2047  *
2048  * Note: this function is weak just to make link_dummies_paged.c happy.
2049  */
2050 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2051 				       uint32_t a2, uint32_t a3,
2052 				       uint32_t a4, uint32_t a5 __unused)
2053 {
2054 	/*
2055 	 * Arguments are supplied from handle_yielding_call() as:
2056 	 * a0 <- w1
2057 	 * a1 <- w3
2058 	 * a2 <- w4
2059 	 * a3 <- w5
2060 	 * a4 <- w6
2061 	 * a5 <- w7
2062 	 */
2063 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2064 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2065 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2066 	return FFA_DENIED;
2067 }
2068 
2069 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2070 {
2071 	uint64_t offs = tpm->u.memref.offs;
2072 
2073 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2074 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2075 
2076 	param->u.fmem.offs_low = offs;
2077 	param->u.fmem.offs_high = offs >> 32;
2078 	if (param->u.fmem.offs_high != offs >> 32)
2079 		return false;
2080 
2081 	param->u.fmem.size = tpm->u.memref.size;
2082 	if (tpm->u.memref.mobj) {
2083 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2084 
2085 		/* If a mobj is passed it better be one with a valid cookie. */
2086 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2087 			return false;
2088 		param->u.fmem.global_id = cookie;
2089 	} else {
2090 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2091 	}
2092 
2093 	return true;
2094 }
2095 
2096 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2097 			    struct thread_param *params,
2098 			    struct optee_msg_arg **arg_ret)
2099 {
2100 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2101 	struct thread_ctx *thr = threads + thread_get_id();
2102 	struct optee_msg_arg *arg = thr->rpc_arg;
2103 
2104 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2105 		return TEE_ERROR_BAD_PARAMETERS;
2106 
2107 	if (!arg) {
2108 		EMSG("rpc_arg not set");
2109 		return TEE_ERROR_GENERIC;
2110 	}
2111 
2112 	memset(arg, 0, sz);
2113 	arg->cmd = cmd;
2114 	arg->num_params = num_params;
2115 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2116 
2117 	for (size_t n = 0; n < num_params; n++) {
2118 		switch (params[n].attr) {
2119 		case THREAD_PARAM_ATTR_NONE:
2120 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2121 			break;
2122 		case THREAD_PARAM_ATTR_VALUE_IN:
2123 		case THREAD_PARAM_ATTR_VALUE_OUT:
2124 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2125 			arg->params[n].attr = params[n].attr -
2126 					      THREAD_PARAM_ATTR_VALUE_IN +
2127 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2128 			arg->params[n].u.value.a = params[n].u.value.a;
2129 			arg->params[n].u.value.b = params[n].u.value.b;
2130 			arg->params[n].u.value.c = params[n].u.value.c;
2131 			break;
2132 		case THREAD_PARAM_ATTR_MEMREF_IN:
2133 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2134 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2135 			if (!set_fmem(arg->params + n, params + n))
2136 				return TEE_ERROR_BAD_PARAMETERS;
2137 			break;
2138 		default:
2139 			return TEE_ERROR_BAD_PARAMETERS;
2140 		}
2141 	}
2142 
2143 	if (arg_ret)
2144 		*arg_ret = arg;
2145 
2146 	return TEE_SUCCESS;
2147 }
2148 
2149 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2150 				struct thread_param *params)
2151 {
2152 	for (size_t n = 0; n < num_params; n++) {
2153 		switch (params[n].attr) {
2154 		case THREAD_PARAM_ATTR_VALUE_OUT:
2155 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2156 			params[n].u.value.a = arg->params[n].u.value.a;
2157 			params[n].u.value.b = arg->params[n].u.value.b;
2158 			params[n].u.value.c = arg->params[n].u.value.c;
2159 			break;
2160 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2161 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2162 			params[n].u.memref.size = arg->params[n].u.fmem.size;
2163 			break;
2164 		default:
2165 			break;
2166 		}
2167 	}
2168 
2169 	return arg->ret;
2170 }
2171 
2172 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2173 			struct thread_param *params)
2174 {
2175 	struct thread_rpc_arg rpc_arg = { .call = {
2176 			.w1 = thread_get_tsd()->rpc_target_info,
2177 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2178 		},
2179 	};
2180 	struct optee_msg_arg *arg = NULL;
2181 	uint32_t ret = 0;
2182 
2183 	ret = get_rpc_arg(cmd, num_params, params, &arg);
2184 	if (ret)
2185 		return ret;
2186 
2187 	thread_rpc(&rpc_arg);
2188 
2189 	return get_rpc_arg_res(arg, num_params, params);
2190 }
2191 
2192 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2193 {
2194 	struct thread_rpc_arg rpc_arg = { .call = {
2195 			.w1 = thread_get_tsd()->rpc_target_info,
2196 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2197 		},
2198 	};
2199 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2200 	uint32_t res2 = 0;
2201 	uint32_t res = 0;
2202 
2203 	DMSG("freeing cookie %#"PRIx64, cookie);
2204 
2205 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
2206 
2207 	mobj_put(mobj);
2208 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2209 	if (res2)
2210 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2211 		     cookie, res2);
2212 	if (!res)
2213 		thread_rpc(&rpc_arg);
2214 }
2215 
2216 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2217 {
2218 	struct thread_rpc_arg rpc_arg = { .call = {
2219 			.w1 = thread_get_tsd()->rpc_target_info,
2220 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2221 		},
2222 	};
2223 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2224 	struct optee_msg_arg *arg = NULL;
2225 	unsigned int internal_offset = 0;
2226 	struct mobj *mobj = NULL;
2227 	uint64_t cookie = 0;
2228 
2229 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2230 		return NULL;
2231 
2232 	thread_rpc(&rpc_arg);
2233 
2234 	if (arg->num_params != 1 ||
2235 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2236 		return NULL;
2237 
2238 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2239 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2240 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2241 	if (!mobj) {
2242 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2243 		     cookie, internal_offset);
2244 		return NULL;
2245 	}
2246 
2247 	assert(mobj_is_nonsec(mobj));
2248 
2249 	if (mobj->size < size) {
2250 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2251 		mobj_put(mobj);
2252 		return NULL;
2253 	}
2254 
2255 	if (mobj_inc_map(mobj)) {
2256 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2257 		mobj_put(mobj);
2258 		return NULL;
2259 	}
2260 
2261 	return mobj;
2262 }
2263 
2264 struct mobj *thread_rpc_alloc_payload(size_t size)
2265 {
2266 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2267 }
2268 
2269 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2270 {
2271 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2272 }
2273 
2274 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2275 {
2276 	if (mobj)
2277 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2278 				mobj_get_cookie(mobj), mobj);
2279 }
2280 
2281 void thread_rpc_free_payload(struct mobj *mobj)
2282 {
2283 	if (mobj)
2284 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2285 				mobj);
2286 }
2287 
2288 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2289 {
2290 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2291 }
2292 
2293 void thread_rpc_free_global_payload(struct mobj *mobj)
2294 {
2295 	if (mobj)
2296 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2297 				mobj_get_cookie(mobj), mobj);
2298 }
2299 
2300 void thread_spmc_register_secondary_ep(vaddr_t ep)
2301 {
2302 	unsigned long ret = 0;
2303 
2304 	/* Let the SPM know the entry point for secondary CPUs */
2305 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2306 
2307 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2308 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2309 }
2310 
2311 static uint16_t ffa_id_get(void)
2312 {
2313 	/*
2314 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2315 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2316 	 * the partition ID (if not).
2317 	 */
2318 	struct thread_smc_args args = {
2319 		.a0 = FFA_ID_GET,
2320 	};
2321 
2322 	thread_smccc(&args);
2323 	if (!is_ffa_success(args.a0)) {
2324 		if (args.a0 == FFA_ERROR)
2325 			EMSG("Get id failed with error %ld", args.a2);
2326 		else
2327 			EMSG("Get id failed");
2328 		panic();
2329 	}
2330 
2331 	return args.a2;
2332 }
2333 
2334 static uint16_t ffa_spm_id_get(void)
2335 {
2336 	/*
2337 	 * Ask the SPM component running at a higher EL to return its ID.
2338 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2339 	 * If not, the ID of the SPMC will be returned.
2340 	 */
2341 	struct thread_smc_args args = {
2342 		.a0 = FFA_SPM_ID_GET,
2343 	};
2344 
2345 	thread_smccc(&args);
2346 	if (!is_ffa_success(args.a0)) {
2347 		if (args.a0 == FFA_ERROR)
2348 			EMSG("Get spm id failed with error %ld", args.a2);
2349 		else
2350 			EMSG("Get spm id failed");
2351 		panic();
2352 	}
2353 
2354 	return args.a2;
2355 }
2356 
2357 #if defined(CFG_CORE_SEL1_SPMC)
2358 static TEE_Result spmc_init(void)
2359 {
2360 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2361 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2362 				     sizeof(struct notif_vm_bitmap), NULL))
2363 		panic("virt_add_guest_spec_data");
2364 	spmd_id = ffa_spm_id_get();
2365 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2366 
2367 	spmc_id = ffa_id_get();
2368 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2369 
2370 	optee_endpoint_id = FFA_SWD_ID_MIN;
2371 	while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id)
2372 		optee_endpoint_id++;
2373 
2374 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2375 
2376 	/*
2377 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2378 	 * normal world regardless of what version we query the SPM with.
2379 	 * However, if SPMD think we are version 1.1 it will forward
2380 	 * queries from normal world to let us negotiate version. So by
2381 	 * setting version 1.0 here we should be compatible.
2382 	 *
2383 	 * Note that disagreement on negotiated version means that we'll
2384 	 * have communication problems with normal world.
2385 	 */
2386 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2387 
2388 	return TEE_SUCCESS;
2389 }
2390 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2391 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2392 {
2393 	struct thread_smc_args args = {
2394 #ifdef ARM64
2395 		.a0 = FFA_RXTX_MAP_64,
2396 #else
2397 		.a0 = FFA_RXTX_MAP_32,
2398 #endif
2399 		.a1 = virt_to_phys(rxtx->tx),
2400 		.a2 = virt_to_phys(rxtx->rx),
2401 		.a3 = 1,
2402 	};
2403 
2404 	thread_smccc(&args);
2405 	if (!is_ffa_success(args.a0)) {
2406 		if (args.a0 == FFA_ERROR)
2407 			EMSG("rxtx map failed with error %ld", args.a2);
2408 		else
2409 			EMSG("rxtx map failed");
2410 		panic();
2411 	}
2412 }
2413 
2414 static uint32_t get_ffa_version(uint32_t my_version)
2415 {
2416 	struct thread_smc_args args = {
2417 		.a0 = FFA_VERSION,
2418 		.a1 = my_version,
2419 	};
2420 
2421 	thread_smccc(&args);
2422 	if (args.a0 & BIT(31)) {
2423 		EMSG("FF-A version failed with error %ld", args.a0);
2424 		panic();
2425 	}
2426 
2427 	return args.a0;
2428 }
2429 
2430 static void *spmc_retrieve_req(uint64_t cookie,
2431 			       struct ffa_mem_transaction_x *trans)
2432 {
2433 	struct ffa_mem_access *acc_descr_array = NULL;
2434 	struct ffa_mem_access_perm *perm_descr = NULL;
2435 	struct thread_smc_args args = {
2436 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2437 		.a3 =	0,	/* Address, Using TX -> MBZ */
2438 		.a4 =   0,	/* Using TX -> MBZ */
2439 	};
2440 	size_t size = 0;
2441 	int rc = 0;
2442 
2443 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2444 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2445 
2446 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2447 		memset(trans_descr, 0, size);
2448 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2449 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2450 		trans_descr->global_handle = cookie;
2451 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2452 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2453 		trans_descr->mem_access_count = 1;
2454 		acc_descr_array = trans_descr->mem_access_array;
2455 	} else {
2456 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2457 
2458 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2459 		memset(trans_descr, 0, size);
2460 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2461 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2462 		trans_descr->global_handle = cookie;
2463 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2464 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2465 		trans_descr->mem_access_count = 1;
2466 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2467 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2468 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2469 					   sizeof(*trans_descr));
2470 	}
2471 	acc_descr_array->region_offs = 0;
2472 	acc_descr_array->reserved = 0;
2473 	perm_descr = &acc_descr_array->access_perm;
2474 	perm_descr->endpoint_id = optee_endpoint_id;
2475 	perm_descr->perm = FFA_MEM_ACC_RW;
2476 	perm_descr->flags = 0;
2477 
2478 	args.a1 = size; /* Total Length */
2479 	args.a2 = size; /* Frag Length == Total length */
2480 	thread_smccc(&args);
2481 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2482 		if (args.a0 == FFA_ERROR)
2483 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2484 			     cookie, (int)args.a2);
2485 		else
2486 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2487 			     cookie, args.a0);
2488 		return NULL;
2489 	}
2490 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2491 				       my_rxtx.size, trans);
2492 	if (rc) {
2493 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2494 		     cookie, rc);
2495 		return NULL;
2496 	}
2497 
2498 	return my_rxtx.rx;
2499 }
2500 
2501 void thread_spmc_relinquish(uint64_t cookie)
2502 {
2503 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2504 	struct thread_smc_args args = {
2505 		.a0 = FFA_MEM_RELINQUISH,
2506 	};
2507 
2508 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2509 	relinquish_desc->handle = cookie;
2510 	relinquish_desc->flags = 0;
2511 	relinquish_desc->endpoint_count = 1;
2512 	relinquish_desc->endpoint_id_array[0] = optee_endpoint_id;
2513 	thread_smccc(&args);
2514 	if (!is_ffa_success(args.a0))
2515 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2516 }
2517 
2518 static int set_pages(struct ffa_address_range *regions,
2519 		     unsigned int num_regions, unsigned int num_pages,
2520 		     struct mobj_ffa *mf)
2521 {
2522 	unsigned int n = 0;
2523 	unsigned int idx = 0;
2524 
2525 	for (n = 0; n < num_regions; n++) {
2526 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2527 		uint64_t addr = READ_ONCE(regions[n].address);
2528 
2529 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2530 			return FFA_INVALID_PARAMETERS;
2531 	}
2532 
2533 	if (idx != num_pages)
2534 		return FFA_INVALID_PARAMETERS;
2535 
2536 	return 0;
2537 }
2538 
2539 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2540 {
2541 	struct mobj_ffa *ret = NULL;
2542 	struct ffa_mem_transaction_x retrieve_desc = { };
2543 	struct ffa_mem_access *descr_array = NULL;
2544 	struct ffa_mem_region *descr = NULL;
2545 	struct mobj_ffa *mf = NULL;
2546 	unsigned int num_pages = 0;
2547 	unsigned int offs = 0;
2548 	void *buf = NULL;
2549 	struct thread_smc_args ffa_rx_release_args = {
2550 		.a0 = FFA_RX_RELEASE
2551 	};
2552 
2553 	/*
2554 	 * OP-TEE is only supporting a single mem_region while the
2555 	 * specification allows for more than one.
2556 	 */
2557 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2558 	if (!buf) {
2559 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2560 		     cookie);
2561 		return NULL;
2562 	}
2563 
2564 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2565 	offs = READ_ONCE(descr_array->region_offs);
2566 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2567 
2568 	num_pages = READ_ONCE(descr->total_page_count);
2569 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2570 	if (!mf)
2571 		goto out;
2572 
2573 	if (set_pages(descr->address_range_array,
2574 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2575 		mobj_ffa_spmc_delete(mf);
2576 		goto out;
2577 	}
2578 
2579 	ret = mf;
2580 
2581 out:
2582 	/* Release RX buffer after the mem retrieve request. */
2583 	thread_smccc(&ffa_rx_release_args);
2584 
2585 	return ret;
2586 }
2587 
2588 static uint32_t get_ffa_version_from_manifest(void *fdt)
2589 {
2590 	int ret = 0;
2591 	uint32_t vers = 0;
2592 
2593 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
2594 	if (ret < 0) {
2595 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
2596 		panic();
2597 	}
2598 
2599 	ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers);
2600 	if (ret < 0) {
2601 		EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d",
2602 		     fdt, ret);
2603 		panic();
2604 	}
2605 
2606 	return vers;
2607 }
2608 
2609 static TEE_Result spmc_init(void)
2610 {
2611 	uint32_t my_vers = 0;
2612 	uint32_t vers = 0;
2613 
2614 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2615 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2616 				     sizeof(struct notif_vm_bitmap), NULL))
2617 		panic("virt_add_guest_spec_data");
2618 
2619 	my_vers = get_ffa_version_from_manifest(get_manifest_dt());
2620 	if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) {
2621 		EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest",
2622 		     FFA_GET_MAJOR_VERSION(my_vers),
2623 		     FFA_GET_MINOR_VERSION(my_vers));
2624 		panic();
2625 	}
2626 	vers = get_ffa_version(my_vers);
2627 	DMSG("SPMC reported version %"PRIu32".%"PRIu32,
2628 	     FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers));
2629 	if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) {
2630 		EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"",
2631 		     FFA_GET_MAJOR_VERSION(vers),
2632 		     FFA_GET_MAJOR_VERSION(my_vers));
2633 		panic();
2634 	}
2635 	if (vers < my_vers)
2636 		my_vers = vers;
2637 	DMSG("Using version %"PRIu32".%"PRIu32"",
2638 	     FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers));
2639 	my_rxtx.ffa_vers = my_vers;
2640 
2641 	spmc_rxtx_map(&my_rxtx);
2642 
2643 	spmc_id = ffa_spm_id_get();
2644 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2645 
2646 	optee_endpoint_id = ffa_id_get();
2647 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2648 
2649 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2650 		spmc_notif_is_ready = true;
2651 		DMSG("Asynchronous notifications are ready");
2652 	}
2653 
2654 	return TEE_SUCCESS;
2655 }
2656 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2657 
2658 nex_service_init(spmc_init);
2659