xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 45fecab081173ef58b1cb14b6ddf6892b0b9d3f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2025, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/dt.h>
12 #include <kernel/interrupt.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/secure_partition.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/spmc_sp_handler.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_private.h>
21 #include <kernel/thread_spmc.h>
22 #include <kernel/virtualization.h>
23 #include <libfdt.h>
24 #include <mm/core_mmu.h>
25 #include <mm/mobj.h>
26 #include <optee_ffa.h>
27 #include <optee_msg.h>
28 #include <optee_rpc_cmd.h>
29 #include <sm/optee_smc.h>
30 #include <string.h>
31 #include <sys/queue.h>
32 #include <tee/entry_std.h>
33 #include <tee/uuid.h>
34 #include <util.h>
35 
36 #if defined(CFG_CORE_SEL1_SPMC)
37 struct mem_share_state {
38 	struct mobj_ffa *mf;
39 	unsigned int page_count;
40 	unsigned int region_count;
41 	unsigned int current_page_idx;
42 };
43 
44 struct mem_frag_state {
45 	struct mem_share_state share;
46 	tee_mm_entry_t *mm;
47 	unsigned int frag_offset;
48 	SLIST_ENTRY(mem_frag_state) link;
49 };
50 #endif
51 
52 struct notif_vm_bitmap {
53 	bool initialized;
54 	int do_bottom_half_value;
55 	uint64_t pending;
56 	uint64_t bound;
57 };
58 
59 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
60 static bool spmc_notif_is_ready __nex_bss;
61 static int notif_intid __nex_data __maybe_unused = -1;
62 
63 /* Id used to look up the guest specific struct notif_vm_bitmap */
64 static unsigned int notif_vm_bitmap_id __nex_bss;
65 /* Notification state when ns-virtualization isn't enabled */
66 static struct notif_vm_bitmap default_notif_vm_bitmap;
67 
68 /* Initialized in spmc_init() below */
69 uint16_t optee_endpoint_id __nex_bss;
70 uint16_t spmc_id __nex_bss;
71 #ifdef CFG_CORE_SEL1_SPMC
72 uint16_t spmd_id __nex_bss;
73 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
74 				      FFA_PART_PROP_DIRECT_REQ_SEND |
75 #ifdef CFG_NS_VIRTUALIZATION
76 				      FFA_PART_PROP_NOTIF_CREATED |
77 				      FFA_PART_PROP_NOTIF_DESTROYED |
78 #endif
79 #ifdef ARM64
80 				      FFA_PART_PROP_AARCH64_STATE |
81 #endif
82 				      FFA_PART_PROP_IS_PE_ID;
83 
84 static uint32_t my_uuid_words[] = {
85 	/*
86 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
87 	 *   SP, or
88 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
89 	 *   logical partition, residing in the same exception level as the
90 	 *   SPMC
91 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
92 	 */
93 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
94 };
95 
96 /*
97  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
98  *
99  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
100  * access this includes the use of content of struct ffa_rxtx::rx and
101  * @frag_state_head.
102  *
103  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
104  * ffa_rxtx::tx and false when it is owned by normal world.
105  *
106  * Note that we can't prevent normal world from updating the content of
107  * these buffers so we must always be careful when reading. while we hold
108  * the lock.
109  */
110 
111 static struct ffa_rxtx my_rxtx __nex_bss;
112 
113 static bool is_nw_buf(struct ffa_rxtx *rxtx)
114 {
115 	return rxtx == &my_rxtx;
116 }
117 
118 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
119 	SLIST_HEAD_INITIALIZER(&frag_state_head);
120 
121 #else
122 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
123 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
124 static struct ffa_rxtx my_rxtx __nex_data = {
125 	.rx = __rx_buf,
126 	.tx = __tx_buf,
127 	.size = sizeof(__rx_buf),
128 };
129 #endif
130 
131 static uint32_t swap_src_dst(uint32_t src_dst)
132 {
133 	return (src_dst >> 16) | (src_dst << 16);
134 }
135 
136 static uint16_t get_sender_id(uint32_t src_dst)
137 {
138 	return src_dst >> 16;
139 }
140 
141 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
142 		   uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
143 		   uint32_t w5)
144 {
145 	*args = (struct thread_smc_1_2_regs){
146 		.a0 = fid,
147 		.a1 = src_dst,
148 		.a2 = w2,
149 		.a3 = w3,
150 		.a4 = w4,
151 		.a5 = w5,
152 	};
153 }
154 
155 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
156 {
157 	if (ffa_ret)
158 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
159 	else
160 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
161 }
162 
163 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
164 {
165 	uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers);
166 	uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers);
167 	uint32_t my_vers = FFA_VERSION_1_2;
168 	uint32_t my_major_vers = 0;
169 	uint32_t my_minor_vers = 0;
170 
171 	/*
172 	 * Holding back the FF-A version if we use Xen or S-EL0 SPs.
173 	 * - Xen doesn't handle negotiating with version 1.2.
174 	 * - S-EL0 SPs are limited to x0-x7 for normal world requests.
175 	 * We'll remove this when the obstacles are cleared.
176 	 */
177 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
178 		my_vers = FFA_VERSION_1_1;
179 
180 	my_major_vers = FFA_GET_MAJOR_VERSION(my_vers);
181 	my_minor_vers = FFA_GET_MINOR_VERSION(my_vers);
182 
183 	/*
184 	 * No locking, if the caller does concurrent calls to this it's
185 	 * only making a mess for itself. We must be able to renegotiate
186 	 * the FF-A version in order to support differing versions between
187 	 * the loader and the driver.
188 	 *
189 	 * Callers should use the version requested if we return a matching
190 	 * major version and a matching or larger minor version. The caller
191 	 * should downgrade to our minor version if our minor version is
192 	 * smaller. Regardless, always return our version as recommended by
193 	 * the specification.
194 	 */
195 	if (major_vers == my_major_vers) {
196 		if (minor_vers > my_minor_vers)
197 			rxtx->ffa_vers = my_vers;
198 		else
199 			rxtx->ffa_vers = vers;
200 	}
201 
202 	return my_vers;
203 }
204 
205 static bool is_ffa_success(uint32_t fid)
206 {
207 #ifdef ARM64
208 	if (fid == FFA_SUCCESS_64)
209 		return true;
210 #endif
211 	return fid == FFA_SUCCESS_32;
212 }
213 
214 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
215 {
216 	if (is_ffa_success(args->a0))
217 		return FFA_OK;
218 	if (args->a0 == FFA_ERROR && args->a2)
219 		return args->a2;
220 	return FFA_NOT_SUPPORTED;
221 }
222 
223 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
224 			   unsigned long a3, unsigned long a4)
225 {
226 	struct thread_smc_args args = {
227 		.a0 = fid,
228 		.a1 = a1,
229 		.a2 = a2,
230 		.a3 = a3,
231 		.a4 = a4,
232 	};
233 
234 	thread_smccc(&args);
235 
236 	return get_ffa_ret_code(&args);
237 }
238 
239 static int __maybe_unused ffa_features(uint32_t id)
240 {
241 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
242 }
243 
244 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
245 					       uint32_t flags, uint64_t bitmap)
246 {
247 	return ffa_simple_call(FFA_NOTIFICATION_SET,
248 			       SHIFT_U32(src, 16) | dst, flags,
249 			       low32_from_64(bitmap), high32_from_64(bitmap));
250 }
251 
252 #if defined(CFG_CORE_SEL1_SPMC)
253 static void handle_features(struct thread_smc_1_2_regs *args)
254 {
255 	uint32_t ret_fid = FFA_ERROR;
256 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
257 
258 	switch (args->a1) {
259 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
260 		if (spmc_notif_is_ready) {
261 			ret_fid = FFA_SUCCESS_32;
262 			ret_w2 = notif_intid;
263 		}
264 		break;
265 
266 #ifdef ARM64
267 	case FFA_RXTX_MAP_64:
268 #endif
269 	case FFA_RXTX_MAP_32:
270 		ret_fid = FFA_SUCCESS_32;
271 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
272 		break;
273 #ifdef ARM64
274 	case FFA_MEM_SHARE_64:
275 #endif
276 	case FFA_MEM_SHARE_32:
277 		ret_fid = FFA_SUCCESS_32;
278 		/*
279 		 * Partition manager supports transmission of a memory
280 		 * transaction descriptor in a buffer dynamically allocated
281 		 * by the endpoint.
282 		 */
283 		ret_w2 = BIT(0);
284 		break;
285 
286 	case FFA_ERROR:
287 	case FFA_VERSION:
288 	case FFA_SUCCESS_32:
289 #ifdef ARM64
290 	case FFA_SUCCESS_64:
291 #endif
292 	case FFA_FEATURES:
293 	case FFA_SPM_ID_GET:
294 	case FFA_MEM_FRAG_TX:
295 	case FFA_MEM_RECLAIM:
296 	case FFA_MSG_SEND_DIRECT_REQ_64:
297 	case FFA_MSG_SEND_DIRECT_REQ_32:
298 	case FFA_INTERRUPT:
299 	case FFA_PARTITION_INFO_GET:
300 	case FFA_RXTX_UNMAP:
301 	case FFA_RX_RELEASE:
302 	case FFA_FEATURE_MANAGED_EXIT_INTR:
303 	case FFA_NOTIFICATION_BITMAP_CREATE:
304 	case FFA_NOTIFICATION_BITMAP_DESTROY:
305 	case FFA_NOTIFICATION_BIND:
306 	case FFA_NOTIFICATION_UNBIND:
307 	case FFA_NOTIFICATION_SET:
308 	case FFA_NOTIFICATION_GET:
309 	case FFA_NOTIFICATION_INFO_GET_32:
310 #ifdef ARM64
311 	case FFA_NOTIFICATION_INFO_GET_64:
312 #endif
313 		ret_fid = FFA_SUCCESS_32;
314 		ret_w2 = FFA_PARAM_MBZ;
315 		break;
316 	default:
317 		break;
318 	}
319 
320 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
321 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
322 }
323 
324 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
325 {
326 	tee_mm_entry_t *mm = NULL;
327 
328 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
329 		return FFA_INVALID_PARAMETERS;
330 
331 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
332 	if (!mm)
333 		return FFA_NO_MEMORY;
334 
335 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
336 					  sz / SMALL_PAGE_SIZE,
337 					  MEM_AREA_NSEC_SHM)) {
338 		tee_mm_free(mm);
339 		return FFA_INVALID_PARAMETERS;
340 	}
341 
342 	*va_ret = (void *)tee_mm_get_smem(mm);
343 	return 0;
344 }
345 
346 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
347 {
348 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id,
349 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
350 }
351 
352 static void unmap_buf(void *va, size_t sz)
353 {
354 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
355 
356 	assert(mm);
357 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
358 	tee_mm_free(mm);
359 }
360 
361 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
362 			  struct ffa_rxtx *rxtx)
363 {
364 	int rc = 0;
365 	unsigned int sz = 0;
366 	paddr_t rx_pa = 0;
367 	paddr_t tx_pa = 0;
368 	void *rx = NULL;
369 	void *tx = NULL;
370 
371 	cpu_spin_lock(&rxtx->spinlock);
372 
373 	if (args->a3 & GENMASK_64(63, 6)) {
374 		rc = FFA_INVALID_PARAMETERS;
375 		goto out;
376 	}
377 
378 	sz = args->a3 * SMALL_PAGE_SIZE;
379 	if (!sz) {
380 		rc = FFA_INVALID_PARAMETERS;
381 		goto out;
382 	}
383 	/* TX/RX are swapped compared to the caller */
384 	tx_pa = args->a2;
385 	rx_pa = args->a1;
386 
387 	if (rxtx->size) {
388 		rc = FFA_DENIED;
389 		goto out;
390 	}
391 
392 	/*
393 	 * If the buffer comes from a SP the address is virtual and already
394 	 * mapped.
395 	 */
396 	if (is_nw_buf(rxtx)) {
397 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
398 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
399 			bool tx_alloced = false;
400 
401 			/*
402 			 * With virtualization we establish this mapping in
403 			 * the nexus mapping which then is replicated to
404 			 * each partition.
405 			 *
406 			 * This means that this mapping must be done before
407 			 * any partition is created and then must not be
408 			 * changed.
409 			 */
410 
411 			/*
412 			 * core_mmu_add_mapping() may reuse previous
413 			 * mappings. First check if there's any mappings to
414 			 * reuse so we know how to clean up in case of
415 			 * failure.
416 			 */
417 			tx = phys_to_virt(tx_pa, mt, sz);
418 			rx = phys_to_virt(rx_pa, mt, sz);
419 			if (!tx) {
420 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
421 				if (!tx) {
422 					rc = FFA_NO_MEMORY;
423 					goto out;
424 				}
425 				tx_alloced = true;
426 			}
427 			if (!rx)
428 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
429 
430 			if (!rx) {
431 				if (tx_alloced && tx)
432 					core_mmu_remove_mapping(mt, tx, sz);
433 				rc = FFA_NO_MEMORY;
434 				goto out;
435 			}
436 		} else {
437 			rc = map_buf(tx_pa, sz, &tx);
438 			if (rc)
439 				goto out;
440 			rc = map_buf(rx_pa, sz, &rx);
441 			if (rc) {
442 				unmap_buf(tx, sz);
443 				goto out;
444 			}
445 		}
446 		rxtx->tx = tx;
447 		rxtx->rx = rx;
448 	} else {
449 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
450 			rc = FFA_INVALID_PARAMETERS;
451 			goto out;
452 		}
453 
454 		if (!virt_to_phys((void *)tx_pa) ||
455 		    !virt_to_phys((void *)rx_pa)) {
456 			rc = FFA_INVALID_PARAMETERS;
457 			goto out;
458 		}
459 
460 		rxtx->tx = (void *)tx_pa;
461 		rxtx->rx = (void *)rx_pa;
462 	}
463 
464 	rxtx->size = sz;
465 	rxtx->tx_is_mine = true;
466 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
467 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
468 out:
469 	cpu_spin_unlock(&rxtx->spinlock);
470 	set_simple_ret_val(args, rc);
471 }
472 
473 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
474 			    struct ffa_rxtx *rxtx)
475 {
476 	int rc = FFA_INVALID_PARAMETERS;
477 
478 	cpu_spin_lock(&rxtx->spinlock);
479 
480 	if (!rxtx->size)
481 		goto out;
482 
483 	/*
484 	 * We don't unmap the SP memory as the SP might still use it.
485 	 * We avoid to make changes to nexus mappings at this stage since
486 	 * there currently isn't a way to replicate those changes to all
487 	 * partitions.
488 	 */
489 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
490 		unmap_buf(rxtx->rx, rxtx->size);
491 		unmap_buf(rxtx->tx, rxtx->size);
492 	}
493 	rxtx->size = 0;
494 	rxtx->rx = NULL;
495 	rxtx->tx = NULL;
496 	rc = 0;
497 out:
498 	cpu_spin_unlock(&rxtx->spinlock);
499 	set_simple_ret_val(args, rc);
500 }
501 
502 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
503 			    struct ffa_rxtx *rxtx)
504 {
505 	int rc = 0;
506 
507 	cpu_spin_lock(&rxtx->spinlock);
508 	/* The senders RX is our TX */
509 	if (!rxtx->size || rxtx->tx_is_mine) {
510 		rc = FFA_DENIED;
511 	} else {
512 		rc = 0;
513 		rxtx->tx_is_mine = true;
514 	}
515 	cpu_spin_unlock(&rxtx->spinlock);
516 
517 	set_simple_ret_val(args, rc);
518 }
519 
520 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
521 {
522 	return !w0 && !w1 && !w2 && !w3;
523 }
524 
525 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
526 {
527 	/*
528 	 * This depends on which UUID we have been assigned.
529 	 * TODO add a generic mechanism to obtain our UUID.
530 	 *
531 	 * The test below is for the hard coded UUID
532 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
533 	 */
534 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
535 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
536 }
537 
538 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
539 				     size_t idx, uint16_t endpoint_id,
540 				     uint16_t execution_context,
541 				     uint32_t part_props,
542 				     const uint32_t uuid_words[4])
543 {
544 	struct ffa_partition_info_x *fpi = NULL;
545 	size_t fpi_size = sizeof(*fpi);
546 
547 	if (ffa_vers >= FFA_VERSION_1_1)
548 		fpi_size += FFA_UUID_SIZE;
549 
550 	if ((idx + 1) * fpi_size > blen)
551 		return TEE_ERROR_OUT_OF_MEMORY;
552 
553 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
554 	fpi->id = endpoint_id;
555 	/* Number of execution contexts implemented by this partition */
556 	fpi->execution_context = execution_context;
557 
558 	fpi->partition_properties = part_props;
559 
560 	/* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
561 	if (ffa_vers < FFA_VERSION_1_1)
562 		fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
563 					     FFA_PART_PROP_DIRECT_REQ_SEND |
564 					     FFA_PART_PROP_INDIRECT_MSGS;
565 
566 	if (ffa_vers >= FFA_VERSION_1_1) {
567 		if (uuid_words)
568 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
569 		else
570 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
571 	}
572 
573 	return TEE_SUCCESS;
574 }
575 
576 static int handle_partition_info_get_all(size_t *elem_count,
577 					 struct ffa_rxtx *rxtx, bool count_only)
578 {
579 	if (!count_only) {
580 		/* Add OP-TEE SP */
581 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
582 					      rxtx->size, 0, optee_endpoint_id,
583 					      CFG_TEE_CORE_NB_CORE,
584 					      my_part_props, my_uuid_words))
585 			return FFA_NO_MEMORY;
586 	}
587 	*elem_count = 1;
588 
589 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
590 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
591 					  NULL, elem_count, count_only))
592 			return FFA_NO_MEMORY;
593 	}
594 
595 	return FFA_OK;
596 }
597 
598 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
599 				    struct ffa_rxtx *rxtx)
600 {
601 	TEE_Result res = TEE_SUCCESS;
602 	uint32_t ret_fid = FFA_ERROR;
603 	uint32_t fpi_size = 0;
604 	uint32_t rc = 0;
605 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
606 
607 	if (!count_only) {
608 		cpu_spin_lock(&rxtx->spinlock);
609 
610 		if (!rxtx->size || !rxtx->tx_is_mine) {
611 			rc = FFA_BUSY;
612 			goto out;
613 		}
614 	}
615 
616 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
617 		size_t elem_count = 0;
618 
619 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
620 							count_only);
621 
622 		if (ret_fid) {
623 			rc = ret_fid;
624 			ret_fid = FFA_ERROR;
625 		} else {
626 			ret_fid = FFA_SUCCESS_32;
627 			rc = elem_count;
628 		}
629 
630 		goto out;
631 	}
632 
633 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
634 		if (!count_only) {
635 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
636 							rxtx->tx, rxtx->size, 0,
637 							optee_endpoint_id,
638 							CFG_TEE_CORE_NB_CORE,
639 							my_part_props,
640 							my_uuid_words);
641 			if (res) {
642 				ret_fid = FFA_ERROR;
643 				rc = FFA_INVALID_PARAMETERS;
644 				goto out;
645 			}
646 		}
647 		rc = 1;
648 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
649 		uint32_t uuid_array[4] = { 0 };
650 		TEE_UUID uuid = { };
651 		size_t count = 0;
652 
653 		uuid_array[0] = args->a1;
654 		uuid_array[1] = args->a2;
655 		uuid_array[2] = args->a3;
656 		uuid_array[3] = args->a4;
657 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
658 
659 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
660 					    rxtx->size, &uuid, &count,
661 					    count_only);
662 		if (res != TEE_SUCCESS) {
663 			ret_fid = FFA_ERROR;
664 			rc = FFA_INVALID_PARAMETERS;
665 			goto out;
666 		}
667 		rc = count;
668 	} else {
669 		ret_fid = FFA_ERROR;
670 		rc = FFA_INVALID_PARAMETERS;
671 		goto out;
672 	}
673 
674 	ret_fid = FFA_SUCCESS_32;
675 
676 out:
677 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
678 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
679 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
680 
681 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
682 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
683 	if (!count_only) {
684 		rxtx->tx_is_mine = false;
685 		cpu_spin_unlock(&rxtx->spinlock);
686 	}
687 }
688 
689 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
690 {
691 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
692 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
693 	uint32_t rc = FFA_OK;
694 
695 	if (endpoint != optee_endpoint_id) {
696 		/*
697 		 * The endpoint should be an SP, try to resume the SP from
698 		 * preempted into busy state.
699 		 */
700 		rc = spmc_sp_resume_from_preempted(endpoint);
701 		if (rc)
702 			goto out;
703 	}
704 
705 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
706 
707 	/* thread_resume_from_rpc return only of the thread_id is invalid */
708 	rc = FFA_INVALID_PARAMETERS;
709 
710 out:
711 	set_simple_ret_val(args, rc);
712 }
713 #endif /*CFG_CORE_SEL1_SPMC*/
714 
715 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
716 						   uint16_t vm_id)
717 {
718 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
719 		if (!prtn)
720 			return NULL;
721 		assert(vm_id == virt_get_guest_id(prtn));
722 		return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
723 	}
724 	if (vm_id)
725 		return NULL;
726 	return &default_notif_vm_bitmap;
727 }
728 
729 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
730 					uint16_t vm_id)
731 {
732 	struct guest_partition *prtn = NULL;
733 	struct notif_vm_bitmap *nvb = NULL;
734 	uint32_t old_itr_status = 0;
735 	uint32_t res = 0;
736 
737 	if (!spmc_notif_is_ready) {
738 		/*
739 		 * This should never happen, not if normal world respects the
740 		 * exchanged capabilities.
741 		 */
742 		EMSG("Asynchronous notifications are not ready");
743 		return TEE_ERROR_NOT_IMPLEMENTED;
744 	}
745 
746 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
747 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
748 		return TEE_ERROR_BAD_PARAMETERS;
749 	}
750 
751 	prtn = virt_get_guest(vm_id);
752 	nvb = get_notif_vm_bitmap(prtn, vm_id);
753 	if (!nvb) {
754 		res = TEE_ERROR_BAD_PARAMETERS;
755 		goto out;
756 	}
757 
758 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
759 	nvb->do_bottom_half_value = bottom_half_value;
760 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
761 
762 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
763 	res = TEE_SUCCESS;
764 out:
765 	virt_put_guest(prtn);
766 	return res;
767 }
768 
769 static void handle_yielding_call(struct thread_smc_1_2_regs *args,
770 				 uint32_t direct_resp_fid)
771 {
772 	TEE_Result res = 0;
773 
774 	thread_check_canaries();
775 
776 #ifdef ARM64
777 	/* Saving this for an eventual RPC */
778 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
779 #endif
780 
781 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
782 		/* Note connection to struct thread_rpc_arg::ret */
783 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
784 				       0);
785 		res = TEE_ERROR_BAD_PARAMETERS;
786 	} else {
787 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
788 				     args->a6, args->a7);
789 		res = TEE_ERROR_BUSY;
790 	}
791 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
792 		      0, res, 0, 0);
793 }
794 
795 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
796 {
797 	uint64_t cookie = reg_pair_to_64(a5, a4);
798 	uint32_t res = 0;
799 
800 	res = mobj_ffa_unregister_by_cookie(cookie);
801 	switch (res) {
802 	case TEE_SUCCESS:
803 	case TEE_ERROR_ITEM_NOT_FOUND:
804 		return 0;
805 	case TEE_ERROR_BUSY:
806 		EMSG("res %#"PRIx32, res);
807 		return FFA_BUSY;
808 	default:
809 		EMSG("res %#"PRIx32, res);
810 		return FFA_INVALID_PARAMETERS;
811 	}
812 }
813 
814 static void handle_blocking_call(struct thread_smc_1_2_regs *args,
815 				 uint32_t direct_resp_fid)
816 {
817 	uint32_t sec_caps = 0;
818 
819 	switch (args->a3) {
820 	case OPTEE_FFA_GET_API_VERSION:
821 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
822 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
823 			      0);
824 		break;
825 	case OPTEE_FFA_GET_OS_VERSION:
826 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
827 			      CFG_OPTEE_REVISION_MAJOR,
828 			      CFG_OPTEE_REVISION_MINOR,
829 			      TEE_IMPL_GIT_SHA1 >> 32);
830 		break;
831 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
832 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
833 		if (spmc_notif_is_ready)
834 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
835 		if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
836 			sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
837 		spmc_set_args(args, direct_resp_fid,
838 			      swap_src_dst(args->a1), 0, 0,
839 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
840 		break;
841 	case OPTEE_FFA_UNREGISTER_SHM:
842 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
843 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
844 		break;
845 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
846 		spmc_set_args(args, direct_resp_fid,
847 			      swap_src_dst(args->a1), 0,
848 			      spmc_enable_async_notif(args->a4,
849 						      FFA_SRC(args->a1)),
850 			      0, 0);
851 		break;
852 	default:
853 		EMSG("Unhandled blocking service ID %#"PRIx32,
854 		     (uint32_t)args->a3);
855 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
856 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
857 	}
858 }
859 
860 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args,
861 					    struct ffa_rxtx *rxtx,
862 					    uint32_t direct_resp_fid)
863 {
864 	uint32_t w0 = FFA_ERROR;
865 	uint32_t w1 = FFA_PARAM_MBZ;
866 	uint32_t w2 = FFA_NOT_SUPPORTED;
867 	uint32_t w3 = FFA_PARAM_MBZ;
868 
869 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
870 	case FFA_MSG_SEND_VM_CREATED:
871 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
872 			uint16_t guest_id = args->a5;
873 			TEE_Result res = virt_guest_created(guest_id);
874 
875 			w0 = direct_resp_fid;
876 			w1 = swap_src_dst(args->a1);
877 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
878 			if (res == TEE_SUCCESS)
879 				w3 = FFA_OK;
880 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
881 				w3 = FFA_DENIED;
882 			else
883 				w3 = FFA_INVALID_PARAMETERS;
884 		}
885 		break;
886 	case FFA_MSG_SEND_VM_DESTROYED:
887 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
888 			uint16_t guest_id = args->a5;
889 			TEE_Result res = virt_guest_destroyed(guest_id);
890 
891 			w0 = direct_resp_fid;
892 			w1 = swap_src_dst(args->a1);
893 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
894 			if (res == TEE_SUCCESS)
895 				w3 = FFA_OK;
896 			else
897 				w3 = FFA_INVALID_PARAMETERS;
898 		}
899 		break;
900 	case FFA_MSG_VERSION_REQ:
901 		w0 = direct_resp_fid;
902 		w1 = swap_src_dst(args->a1);
903 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
904 		w3 = spmc_exchange_version(args->a3, rxtx);
905 		break;
906 	default:
907 		break;
908 	}
909 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
910 }
911 
912 static void handle_direct_request(struct thread_smc_1_2_regs *args,
913 				  struct ffa_rxtx *rxtx)
914 {
915 	uint32_t direct_resp_fid = 0;
916 
917 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
918 	    FFA_DST(args->a1) != spmc_id &&
919 	    FFA_DST(args->a1) != optee_endpoint_id) {
920 		spmc_sp_start_thread(args);
921 		return;
922 	}
923 
924 	if (OPTEE_SMC_IS_64(args->a0))
925 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
926 	else
927 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
928 
929 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
930 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
931 		return;
932 	}
933 
934 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
935 	    virt_set_guest(get_sender_id(args->a1))) {
936 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
937 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
938 		return;
939 	}
940 
941 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
942 		handle_yielding_call(args, direct_resp_fid);
943 	else
944 		handle_blocking_call(args, direct_resp_fid);
945 
946 	/*
947 	 * Note that handle_yielding_call() typically only returns if a
948 	 * thread cannot be allocated or found. virt_unset_guest() is also
949 	 * called from thread_state_suspend() and thread_state_free().
950 	 */
951 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
952 		virt_unset_guest();
953 }
954 
955 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
956 			      struct ffa_mem_transaction_x *trans)
957 {
958 	uint16_t mem_reg_attr = 0;
959 	uint32_t flags = 0;
960 	uint32_t count = 0;
961 	uint32_t offs = 0;
962 	uint32_t size = 0;
963 	size_t n = 0;
964 
965 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
966 		return FFA_INVALID_PARAMETERS;
967 
968 	if (ffa_vers >= FFA_VERSION_1_1) {
969 		struct ffa_mem_transaction_1_1 *descr = NULL;
970 
971 		if (blen < sizeof(*descr))
972 			return FFA_INVALID_PARAMETERS;
973 
974 		descr = buf;
975 		trans->sender_id = READ_ONCE(descr->sender_id);
976 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
977 		flags = READ_ONCE(descr->flags);
978 		trans->global_handle = READ_ONCE(descr->global_handle);
979 		trans->tag = READ_ONCE(descr->tag);
980 
981 		count = READ_ONCE(descr->mem_access_count);
982 		size = READ_ONCE(descr->mem_access_size);
983 		offs = READ_ONCE(descr->mem_access_offs);
984 	} else {
985 		struct ffa_mem_transaction_1_0 *descr = NULL;
986 
987 		if (blen < sizeof(*descr))
988 			return FFA_INVALID_PARAMETERS;
989 
990 		descr = buf;
991 		trans->sender_id = READ_ONCE(descr->sender_id);
992 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
993 		flags = READ_ONCE(descr->flags);
994 		trans->global_handle = READ_ONCE(descr->global_handle);
995 		trans->tag = READ_ONCE(descr->tag);
996 
997 		count = READ_ONCE(descr->mem_access_count);
998 		size = sizeof(struct ffa_mem_access);
999 		offs = offsetof(struct ffa_mem_transaction_1_0,
1000 				mem_access_array);
1001 	}
1002 
1003 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
1004 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
1005 		return FFA_INVALID_PARAMETERS;
1006 
1007 	/* Check that the endpoint memory access descriptor array fits */
1008 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
1009 	    n > blen)
1010 		return FFA_INVALID_PARAMETERS;
1011 
1012 	trans->mem_reg_attr = mem_reg_attr;
1013 	trans->flags = flags;
1014 	trans->mem_access_size = size;
1015 	trans->mem_access_count = count;
1016 	trans->mem_access_offs = offs;
1017 	return 0;
1018 }
1019 
1020 #if defined(CFG_CORE_SEL1_SPMC)
1021 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
1022 			 unsigned int mem_access_count, uint8_t *acc_perms,
1023 			 unsigned int *region_offs)
1024 {
1025 	struct ffa_mem_access_perm *descr = NULL;
1026 	struct ffa_mem_access *mem_acc = NULL;
1027 	unsigned int n = 0;
1028 
1029 	for (n = 0; n < mem_access_count; n++) {
1030 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1031 		descr = &mem_acc->access_perm;
1032 		if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) {
1033 			*acc_perms = READ_ONCE(descr->perm);
1034 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
1035 			return 0;
1036 		}
1037 	}
1038 
1039 	return FFA_INVALID_PARAMETERS;
1040 }
1041 
1042 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
1043 			  size_t blen, unsigned int *page_count,
1044 			  unsigned int *region_count, size_t *addr_range_offs)
1045 {
1046 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1047 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1048 	struct ffa_mem_region *region_descr = NULL;
1049 	unsigned int region_descr_offs = 0;
1050 	uint8_t mem_acc_perm = 0;
1051 	size_t n = 0;
1052 
1053 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1054 		return FFA_INVALID_PARAMETERS;
1055 
1056 	/* Check that the access permissions matches what's expected */
1057 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1058 			  mem_trans->mem_access_size,
1059 			  mem_trans->mem_access_count,
1060 			  &mem_acc_perm, &region_descr_offs) ||
1061 	    mem_acc_perm != exp_mem_acc_perm)
1062 		return FFA_INVALID_PARAMETERS;
1063 
1064 	/* Check that the Composite memory region descriptor fits */
1065 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1066 	    n > blen)
1067 		return FFA_INVALID_PARAMETERS;
1068 
1069 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1070 				  struct ffa_mem_region))
1071 		return FFA_INVALID_PARAMETERS;
1072 
1073 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1074 						 region_descr_offs);
1075 	*page_count = READ_ONCE(region_descr->total_page_count);
1076 	*region_count = READ_ONCE(region_descr->address_range_count);
1077 	*addr_range_offs = n;
1078 	return 0;
1079 }
1080 
1081 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1082 				size_t flen)
1083 {
1084 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1085 	struct ffa_address_range *arange = NULL;
1086 	unsigned int n = 0;
1087 
1088 	if (region_count > s->region_count)
1089 		region_count = s->region_count;
1090 
1091 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1092 		return FFA_INVALID_PARAMETERS;
1093 	arange = buf;
1094 
1095 	for (n = 0; n < region_count; n++) {
1096 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1097 		uint64_t addr = READ_ONCE(arange[n].address);
1098 
1099 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1100 					  addr, page_count))
1101 			return FFA_INVALID_PARAMETERS;
1102 	}
1103 
1104 	s->region_count -= region_count;
1105 	if (s->region_count)
1106 		return region_count * sizeof(*arange);
1107 
1108 	if (s->current_page_idx != s->page_count)
1109 		return FFA_INVALID_PARAMETERS;
1110 
1111 	return 0;
1112 }
1113 
1114 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1115 {
1116 	int rc = 0;
1117 
1118 	rc = add_mem_share_helper(&s->share, buf, flen);
1119 	if (rc >= 0) {
1120 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1121 			/* We're not at the end of the descriptor yet */
1122 			if (s->share.region_count)
1123 				return s->frag_offset;
1124 
1125 			/* We're done */
1126 			rc = 0;
1127 		} else {
1128 			rc = FFA_INVALID_PARAMETERS;
1129 		}
1130 	}
1131 
1132 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1133 	if (rc < 0)
1134 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1135 	else
1136 		mobj_ffa_push_to_inactive(s->share.mf);
1137 	free(s);
1138 
1139 	return rc;
1140 }
1141 
1142 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1143 			void *buf)
1144 {
1145 	struct ffa_mem_access_perm *perm = NULL;
1146 	struct ffa_mem_access *mem_acc = NULL;
1147 
1148 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1149 		return false;
1150 
1151 	if (mem_trans->mem_access_count < 1)
1152 		return false;
1153 
1154 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1155 	perm = &mem_acc->access_perm;
1156 
1157 	/*
1158 	 * perm->endpoint_id is read here only to check if the endpoint is
1159 	 * OP-TEE. We do read it later on again, but there are some additional
1160 	 * checks there to make sure that the data is correct.
1161 	 */
1162 	return READ_ONCE(perm->endpoint_id) != optee_endpoint_id;
1163 }
1164 
1165 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1166 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1167 			 size_t flen, uint64_t *global_handle)
1168 {
1169 	int rc = 0;
1170 	struct mem_share_state share = { };
1171 	size_t addr_range_offs = 0;
1172 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1173 	size_t n = 0;
1174 
1175 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1176 			    &share.region_count, &addr_range_offs);
1177 	if (rc)
1178 		return rc;
1179 
1180 	if (!share.page_count || !share.region_count)
1181 		return FFA_INVALID_PARAMETERS;
1182 
1183 	if (MUL_OVERFLOW(share.region_count,
1184 			 sizeof(struct ffa_address_range), &n) ||
1185 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1186 		return FFA_INVALID_PARAMETERS;
1187 
1188 	if (mem_trans->global_handle)
1189 		cookie = mem_trans->global_handle;
1190 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1191 	if (!share.mf)
1192 		return FFA_NO_MEMORY;
1193 
1194 	if (flen != blen) {
1195 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1196 
1197 		if (!s) {
1198 			rc = FFA_NO_MEMORY;
1199 			goto err;
1200 		}
1201 		s->share = share;
1202 		s->mm = mm;
1203 		s->frag_offset = addr_range_offs;
1204 
1205 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1206 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1207 					flen - addr_range_offs);
1208 
1209 		if (rc >= 0)
1210 			*global_handle = mobj_ffa_get_cookie(share.mf);
1211 
1212 		return rc;
1213 	}
1214 
1215 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1216 				  flen - addr_range_offs);
1217 	if (rc) {
1218 		/*
1219 		 * Number of consumed bytes may be returned instead of 0 for
1220 		 * done.
1221 		 */
1222 		rc = FFA_INVALID_PARAMETERS;
1223 		goto err;
1224 	}
1225 
1226 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1227 
1228 	return 0;
1229 err:
1230 	mobj_ffa_sel1_spmc_delete(share.mf);
1231 	return rc;
1232 }
1233 
1234 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1235 				 unsigned int page_count,
1236 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1237 {
1238 	struct ffa_mem_transaction_x mem_trans = { };
1239 	int rc = 0;
1240 	size_t len = 0;
1241 	void *buf = NULL;
1242 	tee_mm_entry_t *mm = NULL;
1243 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1244 
1245 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1246 		return FFA_INVALID_PARAMETERS;
1247 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1248 		return FFA_INVALID_PARAMETERS;
1249 
1250 	/*
1251 	 * Check that the length reported in flen is covered by len even
1252 	 * if the offset is taken into account.
1253 	 */
1254 	if (len < flen || len - offs < flen)
1255 		return FFA_INVALID_PARAMETERS;
1256 
1257 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1258 	if (!mm)
1259 		return FFA_NO_MEMORY;
1260 
1261 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1262 					  page_count, MEM_AREA_NSEC_SHM)) {
1263 		rc = FFA_INVALID_PARAMETERS;
1264 		goto out;
1265 	}
1266 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1267 
1268 	cpu_spin_lock(&rxtx->spinlock);
1269 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1270 	if (rc)
1271 		goto unlock;
1272 
1273 	if (is_sp_share(&mem_trans, buf)) {
1274 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1275 				       global_handle, NULL);
1276 		goto unlock;
1277 	}
1278 
1279 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1280 	    virt_set_guest(mem_trans.sender_id)) {
1281 		rc = FFA_DENIED;
1282 		goto unlock;
1283 	}
1284 
1285 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1286 
1287 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1288 		virt_unset_guest();
1289 
1290 unlock:
1291 	cpu_spin_unlock(&rxtx->spinlock);
1292 	if (rc > 0)
1293 		return rc;
1294 
1295 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1296 out:
1297 	tee_mm_free(mm);
1298 	return rc;
1299 }
1300 
1301 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1302 				  uint64_t *global_handle,
1303 				  struct ffa_rxtx *rxtx)
1304 {
1305 	struct ffa_mem_transaction_x mem_trans = { };
1306 	int rc = FFA_DENIED;
1307 
1308 	cpu_spin_lock(&rxtx->spinlock);
1309 
1310 	if (!rxtx->rx || flen > rxtx->size)
1311 		goto out;
1312 
1313 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1314 				       &mem_trans);
1315 	if (rc)
1316 		goto out;
1317 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1318 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1319 				       global_handle, NULL);
1320 		goto out;
1321 	}
1322 
1323 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1324 	    virt_set_guest(mem_trans.sender_id))
1325 		goto out;
1326 
1327 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1328 			   global_handle);
1329 
1330 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1331 		virt_unset_guest();
1332 
1333 out:
1334 	cpu_spin_unlock(&rxtx->spinlock);
1335 
1336 	return rc;
1337 }
1338 
1339 static void handle_mem_share(struct thread_smc_1_2_regs *args,
1340 			     struct ffa_rxtx *rxtx)
1341 {
1342 	uint32_t tot_len = args->a1;
1343 	uint32_t frag_len = args->a2;
1344 	uint64_t addr = args->a3;
1345 	uint32_t page_count = args->a4;
1346 	uint32_t ret_w1 = 0;
1347 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1348 	uint32_t ret_w3 = 0;
1349 	uint32_t ret_fid = FFA_ERROR;
1350 	uint64_t global_handle = 0;
1351 	int rc = 0;
1352 
1353 	/* Check that the MBZs are indeed 0 */
1354 	if (args->a5 || args->a6 || args->a7)
1355 		goto out;
1356 
1357 	/* Check that fragment length doesn't exceed total length */
1358 	if (frag_len > tot_len)
1359 		goto out;
1360 
1361 	/* Check for 32-bit calling convention */
1362 	if (args->a0 == FFA_MEM_SHARE_32)
1363 		addr &= UINT32_MAX;
1364 
1365 	if (!addr) {
1366 		/*
1367 		 * The memory transaction descriptor is passed via our rx
1368 		 * buffer.
1369 		 */
1370 		if (page_count)
1371 			goto out;
1372 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1373 					    rxtx);
1374 	} else {
1375 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1376 					   &global_handle, rxtx);
1377 	}
1378 	if (rc < 0) {
1379 		ret_w2 = rc;
1380 	} else if (rc > 0) {
1381 		ret_fid = FFA_MEM_FRAG_RX;
1382 		ret_w3 = rc;
1383 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1384 	} else {
1385 		ret_fid = FFA_SUCCESS_32;
1386 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1387 	}
1388 out:
1389 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1390 }
1391 
1392 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1393 {
1394 	struct mem_frag_state *s = NULL;
1395 
1396 	SLIST_FOREACH(s, &frag_state_head, link)
1397 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1398 			return s;
1399 
1400 	return NULL;
1401 }
1402 
1403 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1404 			       struct ffa_rxtx *rxtx)
1405 {
1406 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1407 	size_t flen = args->a3;
1408 	uint32_t endpoint_id = args->a4;
1409 	struct mem_frag_state *s = NULL;
1410 	tee_mm_entry_t *mm = NULL;
1411 	unsigned int page_count = 0;
1412 	void *buf = NULL;
1413 	uint32_t ret_w1 = 0;
1414 	uint32_t ret_w2 = 0;
1415 	uint32_t ret_w3 = 0;
1416 	uint32_t ret_fid = 0;
1417 	int rc = 0;
1418 
1419 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1420 		uint16_t guest_id = endpoint_id >> 16;
1421 
1422 		if (!guest_id || virt_set_guest(guest_id)) {
1423 			rc = FFA_INVALID_PARAMETERS;
1424 			goto out_set_rc;
1425 		}
1426 	}
1427 
1428 	/*
1429 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1430 	 * requests.
1431 	 */
1432 
1433 	cpu_spin_lock(&rxtx->spinlock);
1434 
1435 	s = get_frag_state(global_handle);
1436 	if (!s) {
1437 		rc = FFA_INVALID_PARAMETERS;
1438 		goto out;
1439 	}
1440 
1441 	mm = s->mm;
1442 	if (mm) {
1443 		if (flen > tee_mm_get_bytes(mm)) {
1444 			rc = FFA_INVALID_PARAMETERS;
1445 			goto out;
1446 		}
1447 		page_count = s->share.page_count;
1448 		buf = (void *)tee_mm_get_smem(mm);
1449 	} else {
1450 		if (flen > rxtx->size) {
1451 			rc = FFA_INVALID_PARAMETERS;
1452 			goto out;
1453 		}
1454 		buf = rxtx->rx;
1455 	}
1456 
1457 	rc = add_mem_share_frag(s, buf, flen);
1458 out:
1459 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1460 		virt_unset_guest();
1461 
1462 	cpu_spin_unlock(&rxtx->spinlock);
1463 
1464 	if (rc <= 0 && mm) {
1465 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1466 		tee_mm_free(mm);
1467 	}
1468 
1469 out_set_rc:
1470 	if (rc < 0) {
1471 		ret_fid = FFA_ERROR;
1472 		ret_w2 = rc;
1473 	} else if (rc > 0) {
1474 		ret_fid = FFA_MEM_FRAG_RX;
1475 		ret_w3 = rc;
1476 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1477 	} else {
1478 		ret_fid = FFA_SUCCESS_32;
1479 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1480 	}
1481 
1482 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1483 }
1484 
1485 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1486 {
1487 	int rc = FFA_INVALID_PARAMETERS;
1488 	uint64_t cookie = 0;
1489 
1490 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1491 		goto out;
1492 
1493 	cookie = reg_pair_to_64(args->a2, args->a1);
1494 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1495 		uint16_t guest_id = 0;
1496 
1497 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1498 			guest_id = virt_find_guest_by_cookie(cookie);
1499 		} else {
1500 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1501 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1502 		}
1503 		if (!guest_id)
1504 			goto out;
1505 		if (virt_set_guest(guest_id)) {
1506 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1507 								      cookie))
1508 				rc = FFA_OK;
1509 			goto out;
1510 		}
1511 	}
1512 
1513 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1514 	case TEE_SUCCESS:
1515 		rc = FFA_OK;
1516 		break;
1517 	case TEE_ERROR_ITEM_NOT_FOUND:
1518 		DMSG("cookie %#"PRIx64" not found", cookie);
1519 		rc = FFA_INVALID_PARAMETERS;
1520 		break;
1521 	default:
1522 		DMSG("cookie %#"PRIx64" busy", cookie);
1523 		rc = FFA_DENIED;
1524 		break;
1525 	}
1526 
1527 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1528 		virt_unset_guest();
1529 
1530 out:
1531 	set_simple_ret_val(args, rc);
1532 }
1533 
1534 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1535 {
1536 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1537 	uint32_t ret_fid = FFA_ERROR;
1538 	uint32_t old_itr_status = 0;
1539 
1540 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1541 	    !args->a5 && !args->a6 && !args->a7) {
1542 		struct guest_partition *prtn = NULL;
1543 		struct notif_vm_bitmap *nvb = NULL;
1544 		uint16_t vm_id = args->a1;
1545 
1546 		prtn = virt_get_guest(vm_id);
1547 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1548 		if (!nvb) {
1549 			ret_val = FFA_INVALID_PARAMETERS;
1550 			goto out_virt_put;
1551 		}
1552 
1553 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1554 
1555 		if (nvb->initialized) {
1556 			ret_val = FFA_DENIED;
1557 			goto out_unlock;
1558 		}
1559 
1560 		nvb->initialized = true;
1561 		nvb->do_bottom_half_value = -1;
1562 		ret_val = FFA_OK;
1563 		ret_fid = FFA_SUCCESS_32;
1564 out_unlock:
1565 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1566 out_virt_put:
1567 		virt_put_guest(prtn);
1568 	}
1569 
1570 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1571 }
1572 
1573 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1574 {
1575 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1576 	uint32_t ret_fid = FFA_ERROR;
1577 	uint32_t old_itr_status = 0;
1578 
1579 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1580 	    !args->a5 && !args->a6 && !args->a7) {
1581 		struct guest_partition *prtn = NULL;
1582 		struct notif_vm_bitmap *nvb = NULL;
1583 		uint16_t vm_id = args->a1;
1584 
1585 		prtn = virt_get_guest(vm_id);
1586 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1587 		if (!nvb) {
1588 			ret_val = FFA_INVALID_PARAMETERS;
1589 			goto out_virt_put;
1590 		}
1591 
1592 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1593 
1594 		if (nvb->pending || nvb->bound) {
1595 			ret_val = FFA_DENIED;
1596 			goto out_unlock;
1597 		}
1598 
1599 		memset(nvb, 0, sizeof(*nvb));
1600 		ret_val = FFA_OK;
1601 		ret_fid = FFA_SUCCESS_32;
1602 out_unlock:
1603 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1604 out_virt_put:
1605 		virt_put_guest(prtn);
1606 	}
1607 
1608 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1609 }
1610 
1611 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1612 {
1613 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1614 	struct guest_partition *prtn = NULL;
1615 	struct notif_vm_bitmap *nvb = NULL;
1616 	uint32_t ret_fid = FFA_ERROR;
1617 	uint32_t old_itr_status = 0;
1618 	uint64_t bitmap = 0;
1619 	uint16_t vm_id = 0;
1620 
1621 	if (args->a5 || args->a6 || args->a7)
1622 		goto out;
1623 	if (args->a2) {
1624 		/* We only deal with global notifications */
1625 		ret_val = FFA_DENIED;
1626 		goto out;
1627 	}
1628 
1629 	/* The destination of the eventual notification */
1630 	vm_id = FFA_DST(args->a1);
1631 	bitmap = reg_pair_to_64(args->a4, args->a3);
1632 
1633 	prtn = virt_get_guest(vm_id);
1634 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1635 	if (!nvb) {
1636 		ret_val = FFA_INVALID_PARAMETERS;
1637 		goto out_virt_put;
1638 	}
1639 
1640 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1641 
1642 	if ((bitmap & nvb->bound)) {
1643 		ret_val = FFA_DENIED;
1644 	} else {
1645 		nvb->bound |= bitmap;
1646 		ret_val = FFA_OK;
1647 		ret_fid = FFA_SUCCESS_32;
1648 	}
1649 
1650 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1651 out_virt_put:
1652 	virt_put_guest(prtn);
1653 out:
1654 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1655 }
1656 
1657 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1658 {
1659 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1660 	struct guest_partition *prtn = NULL;
1661 	struct notif_vm_bitmap *nvb = NULL;
1662 	uint32_t ret_fid = FFA_ERROR;
1663 	uint32_t old_itr_status = 0;
1664 	uint64_t bitmap = 0;
1665 	uint16_t vm_id = 0;
1666 
1667 	if (args->a2 || args->a5 || args->a6 || args->a7)
1668 		goto out;
1669 
1670 	/* The destination of the eventual notification */
1671 	vm_id = FFA_DST(args->a1);
1672 	bitmap = reg_pair_to_64(args->a4, args->a3);
1673 
1674 	prtn = virt_get_guest(vm_id);
1675 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1676 	if (!nvb) {
1677 		ret_val = FFA_INVALID_PARAMETERS;
1678 		goto out_virt_put;
1679 	}
1680 
1681 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1682 
1683 	if (bitmap & nvb->pending) {
1684 		ret_val = FFA_DENIED;
1685 	} else {
1686 		nvb->bound &= ~bitmap;
1687 		ret_val = FFA_OK;
1688 		ret_fid = FFA_SUCCESS_32;
1689 	}
1690 
1691 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1692 out_virt_put:
1693 	virt_put_guest(prtn);
1694 out:
1695 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1696 }
1697 
1698 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1699 {
1700 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1701 	struct guest_partition *prtn = NULL;
1702 	struct notif_vm_bitmap *nvb = NULL;
1703 	uint32_t ret_fid = FFA_ERROR;
1704 	uint32_t old_itr_status = 0;
1705 	uint16_t vm_id = 0;
1706 	uint32_t w3 = 0;
1707 
1708 	if (args->a5 || args->a6 || args->a7)
1709 		goto out;
1710 	if (!(args->a2 & 0x1)) {
1711 		ret_fid = FFA_SUCCESS_32;
1712 		w2 = 0;
1713 		goto out;
1714 	}
1715 	vm_id = FFA_DST(args->a1);
1716 
1717 	prtn = virt_get_guest(vm_id);
1718 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1719 	if (!nvb)
1720 		goto out_virt_put;
1721 
1722 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1723 
1724 	reg_pair_from_64(nvb->pending, &w3, &w2);
1725 	nvb->pending = 0;
1726 	ret_fid = FFA_SUCCESS_32;
1727 
1728 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1729 out_virt_put:
1730 	virt_put_guest(prtn);
1731 out:
1732 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1733 }
1734 
1735 struct notif_info_get_state {
1736 	struct thread_smc_1_2_regs *args;
1737 	unsigned int ids_per_reg;
1738 	unsigned int ids_count;
1739 	unsigned int id_pos;
1740 	unsigned int count;
1741 	unsigned int max_list_count;
1742 	unsigned int list_count;
1743 };
1744 
1745 static bool add_id_in_regs(struct notif_info_get_state *state,
1746 			   uint16_t id)
1747 {
1748 	unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1749 	unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1750 
1751 	if (reg_idx > 7)
1752 		return false;
1753 
1754 	state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1755 	state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1756 
1757 	state->id_pos++;
1758 	state->count++;
1759 	return true;
1760 }
1761 
1762 static bool add_id_count(struct notif_info_get_state *state)
1763 {
1764 	assert(state->list_count < state->max_list_count &&
1765 	       state->count >= 1 && state->count <= 4);
1766 
1767 	state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1768 	state->list_count++;
1769 	state->count = 0;
1770 
1771 	return state->list_count < state->max_list_count;
1772 }
1773 
1774 static bool add_nvb_to_state(struct notif_info_get_state *state,
1775 			     uint16_t guest_id, struct notif_vm_bitmap *nvb)
1776 {
1777 	if (!nvb->pending)
1778 		return true;
1779 	/*
1780 	 * Add only the guest_id, meaning a global notification for this
1781 	 * guest.
1782 	 *
1783 	 * If notifications for one or more specific vCPUs we'd add those
1784 	 * before calling add_id_count(), but that's not supported.
1785 	 */
1786 	return add_id_in_regs(state, guest_id) && add_id_count(state);
1787 }
1788 
1789 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1790 {
1791 	struct notif_info_get_state state = { .args = args };
1792 	uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1793 	struct guest_partition *prtn = NULL;
1794 	struct notif_vm_bitmap *nvb = NULL;
1795 	uint32_t more_pending_flag = 0;
1796 	uint32_t itr_state = 0;
1797 	uint16_t guest_id = 0;
1798 
1799 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1800 	    args->a6 || args->a7)
1801 		goto err;
1802 
1803 	if (OPTEE_SMC_IS_64(args->a0)) {
1804 		spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1805 		state.ids_per_reg = 4;
1806 		state.max_list_count = 31;
1807 	} else {
1808 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1809 		state.ids_per_reg = 2;
1810 		state.max_list_count = 15;
1811 	}
1812 
1813 	while (true) {
1814 		/*
1815 		 * With NS-Virtualization we need to go through all
1816 		 * partitions to collect the notification bitmaps, without
1817 		 * we just check the only notification bitmap we have.
1818 		 */
1819 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1820 			prtn = virt_next_guest(prtn);
1821 			if (!prtn)
1822 				break;
1823 			guest_id = virt_get_guest_id(prtn);
1824 		}
1825 		nvb = get_notif_vm_bitmap(prtn, guest_id);
1826 
1827 		itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1828 		if (!add_nvb_to_state(&state, guest_id, nvb))
1829 			more_pending_flag = BIT(0);
1830 		cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1831 
1832 		if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1833 			break;
1834 	}
1835 	virt_put_guest(prtn);
1836 
1837 	if (!state.id_pos) {
1838 		ffa_res = FFA_NO_DATA;
1839 		goto err;
1840 	}
1841 	args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1842 		   (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1843 		   more_pending_flag;
1844 	return;
1845 err:
1846 	spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1847 }
1848 
1849 void thread_spmc_set_async_notif_intid(int intid)
1850 {
1851 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1852 	notif_intid = intid;
1853 	spmc_notif_is_ready = true;
1854 	DMSG("Asynchronous notifications are ready");
1855 }
1856 
1857 void notif_send_async(uint32_t value, uint16_t guest_id)
1858 {
1859 	struct guest_partition *prtn = NULL;
1860 	struct notif_vm_bitmap *nvb = NULL;
1861 	uint32_t old_itr_status = 0;
1862 
1863 	prtn = virt_get_guest(guest_id);
1864 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1865 
1866 	if (nvb) {
1867 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1868 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1869 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1870 		       notif_intid >= 0);
1871 		nvb->pending |= BIT64(nvb->do_bottom_half_value);
1872 		interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1873 				    ITR_CPU_MASK_TO_THIS_CPU);
1874 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1875 	}
1876 
1877 	virt_put_guest(prtn);
1878 }
1879 #else
1880 void notif_send_async(uint32_t value, uint16_t guest_id)
1881 {
1882 	struct guest_partition *prtn = NULL;
1883 	struct notif_vm_bitmap *nvb = NULL;
1884 	/* global notification, delay notification interrupt */
1885 	uint32_t flags = BIT32(1);
1886 	int res = 0;
1887 
1888 	prtn = virt_get_guest(guest_id);
1889 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1890 
1891 	if (nvb) {
1892 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1893 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1894 		res = ffa_set_notification(guest_id, optee_endpoint_id, flags,
1895 					   BIT64(nvb->do_bottom_half_value));
1896 		if (res) {
1897 			EMSG("notification set failed with error %d", res);
1898 			panic();
1899 		}
1900 	}
1901 
1902 	virt_put_guest(prtn);
1903 }
1904 #endif
1905 
1906 /* Only called from assembly */
1907 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
1908 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1909 {
1910 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1911 	switch (args->a0) {
1912 #if defined(CFG_CORE_SEL1_SPMC)
1913 	case FFA_FEATURES:
1914 		handle_features(args);
1915 		break;
1916 	case FFA_SPM_ID_GET:
1917 		spmc_handle_spm_id_get(args);
1918 		break;
1919 #ifdef ARM64
1920 	case FFA_RXTX_MAP_64:
1921 #endif
1922 	case FFA_RXTX_MAP_32:
1923 		spmc_handle_rxtx_map(args, &my_rxtx);
1924 		break;
1925 	case FFA_RXTX_UNMAP:
1926 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1927 		break;
1928 	case FFA_RX_RELEASE:
1929 		spmc_handle_rx_release(args, &my_rxtx);
1930 		break;
1931 	case FFA_PARTITION_INFO_GET:
1932 		spmc_handle_partition_info_get(args, &my_rxtx);
1933 		break;
1934 	case FFA_RUN:
1935 		spmc_handle_run(args);
1936 		break;
1937 #endif /*CFG_CORE_SEL1_SPMC*/
1938 	case FFA_INTERRUPT:
1939 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1940 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1941 				      0, 0);
1942 		else
1943 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1944 		break;
1945 #ifdef ARM64
1946 	case FFA_MSG_SEND_DIRECT_REQ_64:
1947 #endif
1948 	case FFA_MSG_SEND_DIRECT_REQ_32:
1949 		handle_direct_request(args, &my_rxtx);
1950 		break;
1951 #if defined(CFG_CORE_SEL1_SPMC)
1952 #ifdef ARM64
1953 	case FFA_MEM_SHARE_64:
1954 #endif
1955 	case FFA_MEM_SHARE_32:
1956 		handle_mem_share(args, &my_rxtx);
1957 		break;
1958 	case FFA_MEM_RECLAIM:
1959 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1960 		    !ffa_mem_reclaim(args, NULL))
1961 			handle_mem_reclaim(args);
1962 		break;
1963 	case FFA_MEM_FRAG_TX:
1964 		handle_mem_frag_tx(args, &my_rxtx);
1965 		break;
1966 	case FFA_NOTIFICATION_BITMAP_CREATE:
1967 		handle_notification_bitmap_create(args);
1968 		break;
1969 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1970 		handle_notification_bitmap_destroy(args);
1971 		break;
1972 	case FFA_NOTIFICATION_BIND:
1973 		handle_notification_bind(args);
1974 		break;
1975 	case FFA_NOTIFICATION_UNBIND:
1976 		handle_notification_unbind(args);
1977 		break;
1978 	case FFA_NOTIFICATION_GET:
1979 		handle_notification_get(args);
1980 		break;
1981 #ifdef ARM64
1982 	case FFA_NOTIFICATION_INFO_GET_64:
1983 #endif
1984 	case FFA_NOTIFICATION_INFO_GET_32:
1985 		handle_notification_info_get(args);
1986 		break;
1987 #endif /*CFG_CORE_SEL1_SPMC*/
1988 	case FFA_ERROR:
1989 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1990 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1991 			/*
1992 			 * The SPMC will return an FFA_ERROR back so better
1993 			 * panic() now than flooding the log.
1994 			 */
1995 			panic("FFA_ERROR from SPMC is fatal");
1996 		}
1997 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1998 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1999 		break;
2000 	default:
2001 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
2002 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
2003 	}
2004 }
2005 
2006 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
2007 {
2008 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2009 	struct thread_ctx *thr = threads + thread_get_id();
2010 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2011 	struct optee_msg_arg *arg = NULL;
2012 	struct mobj *mobj = NULL;
2013 	uint32_t num_params = 0;
2014 	size_t sz = 0;
2015 
2016 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
2017 	if (!mobj) {
2018 		EMSG("Can't find cookie %#"PRIx64, cookie);
2019 		return TEE_ERROR_BAD_PARAMETERS;
2020 	}
2021 
2022 	res = mobj_inc_map(mobj);
2023 	if (res)
2024 		goto out_put_mobj;
2025 
2026 	res = TEE_ERROR_BAD_PARAMETERS;
2027 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
2028 	if (!arg)
2029 		goto out_dec_map;
2030 
2031 	num_params = READ_ONCE(arg->num_params);
2032 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2033 		goto out_dec_map;
2034 
2035 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2036 
2037 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2038 	if (!thr->rpc_arg)
2039 		goto out_dec_map;
2040 
2041 	virt_on_stdcall();
2042 	res = tee_entry_std(arg, num_params);
2043 
2044 	thread_rpc_shm_cache_clear(&thr->shm_cache);
2045 	thr->rpc_arg = NULL;
2046 
2047 out_dec_map:
2048 	mobj_dec_map(mobj);
2049 out_put_mobj:
2050 	mobj_put(mobj);
2051 	return res;
2052 }
2053 
2054 /*
2055  * Helper routine for the assembly function thread_std_smc_entry()
2056  *
2057  * Note: this function is weak just to make link_dummies_paged.c happy.
2058  */
2059 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2060 				       uint32_t a2, uint32_t a3,
2061 				       uint32_t a4, uint32_t a5 __unused)
2062 {
2063 	/*
2064 	 * Arguments are supplied from handle_yielding_call() as:
2065 	 * a0 <- w1
2066 	 * a1 <- w3
2067 	 * a2 <- w4
2068 	 * a3 <- w5
2069 	 * a4 <- w6
2070 	 * a5 <- w7
2071 	 */
2072 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2073 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2074 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2075 	return FFA_DENIED;
2076 }
2077 
2078 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2079 {
2080 	uint64_t offs = tpm->u.memref.offs;
2081 
2082 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2083 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2084 
2085 	param->u.fmem.offs_low = offs;
2086 	param->u.fmem.offs_high = offs >> 32;
2087 	if (param->u.fmem.offs_high != offs >> 32)
2088 		return false;
2089 
2090 	param->u.fmem.size = tpm->u.memref.size;
2091 	if (tpm->u.memref.mobj) {
2092 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2093 
2094 		/* If a mobj is passed it better be one with a valid cookie. */
2095 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2096 			return false;
2097 		param->u.fmem.global_id = cookie;
2098 	} else {
2099 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2100 	}
2101 
2102 	return true;
2103 }
2104 
2105 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2106 			    struct thread_param *params,
2107 			    struct optee_msg_arg **arg_ret)
2108 {
2109 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2110 	struct thread_ctx *thr = threads + thread_get_id();
2111 	struct optee_msg_arg *arg = thr->rpc_arg;
2112 
2113 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2114 		return TEE_ERROR_BAD_PARAMETERS;
2115 
2116 	if (!arg) {
2117 		EMSG("rpc_arg not set");
2118 		return TEE_ERROR_GENERIC;
2119 	}
2120 
2121 	memset(arg, 0, sz);
2122 	arg->cmd = cmd;
2123 	arg->num_params = num_params;
2124 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2125 
2126 	for (size_t n = 0; n < num_params; n++) {
2127 		switch (params[n].attr) {
2128 		case THREAD_PARAM_ATTR_NONE:
2129 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2130 			break;
2131 		case THREAD_PARAM_ATTR_VALUE_IN:
2132 		case THREAD_PARAM_ATTR_VALUE_OUT:
2133 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2134 			arg->params[n].attr = params[n].attr -
2135 					      THREAD_PARAM_ATTR_VALUE_IN +
2136 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2137 			arg->params[n].u.value.a = params[n].u.value.a;
2138 			arg->params[n].u.value.b = params[n].u.value.b;
2139 			arg->params[n].u.value.c = params[n].u.value.c;
2140 			break;
2141 		case THREAD_PARAM_ATTR_MEMREF_IN:
2142 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2143 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2144 			if (!set_fmem(arg->params + n, params + n))
2145 				return TEE_ERROR_BAD_PARAMETERS;
2146 			break;
2147 		default:
2148 			return TEE_ERROR_BAD_PARAMETERS;
2149 		}
2150 	}
2151 
2152 	if (arg_ret)
2153 		*arg_ret = arg;
2154 
2155 	return TEE_SUCCESS;
2156 }
2157 
2158 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2159 				struct thread_param *params)
2160 {
2161 	for (size_t n = 0; n < num_params; n++) {
2162 		switch (params[n].attr) {
2163 		case THREAD_PARAM_ATTR_VALUE_OUT:
2164 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2165 			params[n].u.value.a = arg->params[n].u.value.a;
2166 			params[n].u.value.b = arg->params[n].u.value.b;
2167 			params[n].u.value.c = arg->params[n].u.value.c;
2168 			break;
2169 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2170 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2171 			params[n].u.memref.size = arg->params[n].u.fmem.size;
2172 			break;
2173 		default:
2174 			break;
2175 		}
2176 	}
2177 
2178 	return arg->ret;
2179 }
2180 
2181 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2182 			struct thread_param *params)
2183 {
2184 	struct thread_rpc_arg rpc_arg = { .call = {
2185 			.w1 = thread_get_tsd()->rpc_target_info,
2186 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2187 		},
2188 	};
2189 	struct optee_msg_arg *arg = NULL;
2190 	uint32_t ret = 0;
2191 
2192 	ret = get_rpc_arg(cmd, num_params, params, &arg);
2193 	if (ret)
2194 		return ret;
2195 
2196 	thread_rpc(&rpc_arg);
2197 
2198 	return get_rpc_arg_res(arg, num_params, params);
2199 }
2200 
2201 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2202 {
2203 	struct thread_rpc_arg rpc_arg = { .call = {
2204 			.w1 = thread_get_tsd()->rpc_target_info,
2205 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2206 		},
2207 	};
2208 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2209 	uint32_t res2 = 0;
2210 	uint32_t res = 0;
2211 
2212 	DMSG("freeing cookie %#"PRIx64, cookie);
2213 
2214 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
2215 
2216 	mobj_put(mobj);
2217 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2218 	if (res2)
2219 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2220 		     cookie, res2);
2221 	if (!res)
2222 		thread_rpc(&rpc_arg);
2223 }
2224 
2225 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2226 {
2227 	struct thread_rpc_arg rpc_arg = { .call = {
2228 			.w1 = thread_get_tsd()->rpc_target_info,
2229 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2230 		},
2231 	};
2232 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2233 	struct optee_msg_arg *arg = NULL;
2234 	unsigned int internal_offset = 0;
2235 	struct mobj *mobj = NULL;
2236 	uint64_t cookie = 0;
2237 
2238 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2239 		return NULL;
2240 
2241 	thread_rpc(&rpc_arg);
2242 
2243 	if (arg->num_params != 1 ||
2244 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2245 		return NULL;
2246 
2247 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2248 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2249 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2250 	if (!mobj) {
2251 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2252 		     cookie, internal_offset);
2253 		return NULL;
2254 	}
2255 
2256 	assert(mobj_is_nonsec(mobj));
2257 
2258 	if (mobj->size < size) {
2259 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2260 		mobj_put(mobj);
2261 		return NULL;
2262 	}
2263 
2264 	if (mobj_inc_map(mobj)) {
2265 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2266 		mobj_put(mobj);
2267 		return NULL;
2268 	}
2269 
2270 	return mobj;
2271 }
2272 
2273 struct mobj *thread_rpc_alloc_payload(size_t size)
2274 {
2275 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2276 }
2277 
2278 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2279 {
2280 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2281 }
2282 
2283 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2284 {
2285 	if (mobj)
2286 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2287 				mobj_get_cookie(mobj), mobj);
2288 }
2289 
2290 void thread_rpc_free_payload(struct mobj *mobj)
2291 {
2292 	if (mobj)
2293 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2294 				mobj);
2295 }
2296 
2297 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2298 {
2299 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2300 }
2301 
2302 void thread_rpc_free_global_payload(struct mobj *mobj)
2303 {
2304 	if (mobj)
2305 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2306 				mobj_get_cookie(mobj), mobj);
2307 }
2308 
2309 void thread_spmc_register_secondary_ep(vaddr_t ep)
2310 {
2311 	unsigned long ret = 0;
2312 
2313 	/* Let the SPM know the entry point for secondary CPUs */
2314 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2315 
2316 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2317 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2318 }
2319 
2320 static uint16_t ffa_id_get(void)
2321 {
2322 	/*
2323 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2324 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2325 	 * the partition ID (if not).
2326 	 */
2327 	struct thread_smc_args args = {
2328 		.a0 = FFA_ID_GET,
2329 	};
2330 
2331 	thread_smccc(&args);
2332 	if (!is_ffa_success(args.a0)) {
2333 		if (args.a0 == FFA_ERROR)
2334 			EMSG("Get id failed with error %ld", args.a2);
2335 		else
2336 			EMSG("Get id failed");
2337 		panic();
2338 	}
2339 
2340 	return args.a2;
2341 }
2342 
2343 static uint16_t ffa_spm_id_get(void)
2344 {
2345 	/*
2346 	 * Ask the SPM component running at a higher EL to return its ID.
2347 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2348 	 * If not, the ID of the SPMC will be returned.
2349 	 */
2350 	struct thread_smc_args args = {
2351 		.a0 = FFA_SPM_ID_GET,
2352 	};
2353 
2354 	thread_smccc(&args);
2355 	if (!is_ffa_success(args.a0)) {
2356 		if (args.a0 == FFA_ERROR)
2357 			EMSG("Get spm id failed with error %ld", args.a2);
2358 		else
2359 			EMSG("Get spm id failed");
2360 		panic();
2361 	}
2362 
2363 	return args.a2;
2364 }
2365 
2366 #if defined(CFG_CORE_SEL1_SPMC)
2367 static TEE_Result spmc_init(void)
2368 {
2369 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2370 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2371 				     sizeof(struct notif_vm_bitmap), NULL))
2372 		panic("virt_add_guest_spec_data");
2373 	spmd_id = ffa_spm_id_get();
2374 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2375 
2376 	spmc_id = ffa_id_get();
2377 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2378 
2379 	optee_endpoint_id = FFA_SWD_ID_MIN;
2380 	while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id)
2381 		optee_endpoint_id++;
2382 
2383 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2384 
2385 	/*
2386 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2387 	 * normal world regardless of what version we query the SPM with.
2388 	 * However, if SPMD think we are version 1.1 it will forward
2389 	 * queries from normal world to let us negotiate version. So by
2390 	 * setting version 1.0 here we should be compatible.
2391 	 *
2392 	 * Note that disagreement on negotiated version means that we'll
2393 	 * have communication problems with normal world.
2394 	 */
2395 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2396 
2397 	return TEE_SUCCESS;
2398 }
2399 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2400 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2401 {
2402 	struct thread_smc_args args = {
2403 #ifdef ARM64
2404 		.a0 = FFA_RXTX_MAP_64,
2405 #else
2406 		.a0 = FFA_RXTX_MAP_32,
2407 #endif
2408 		.a1 = virt_to_phys(rxtx->tx),
2409 		.a2 = virt_to_phys(rxtx->rx),
2410 		.a3 = 1,
2411 	};
2412 
2413 	thread_smccc(&args);
2414 	if (!is_ffa_success(args.a0)) {
2415 		if (args.a0 == FFA_ERROR)
2416 			EMSG("rxtx map failed with error %ld", args.a2);
2417 		else
2418 			EMSG("rxtx map failed");
2419 		panic();
2420 	}
2421 }
2422 
2423 static uint32_t get_ffa_version(uint32_t my_version)
2424 {
2425 	struct thread_smc_args args = {
2426 		.a0 = FFA_VERSION,
2427 		.a1 = my_version,
2428 	};
2429 
2430 	thread_smccc(&args);
2431 	if (args.a0 & BIT(31)) {
2432 		EMSG("FF-A version failed with error %ld", args.a0);
2433 		panic();
2434 	}
2435 
2436 	return args.a0;
2437 }
2438 
2439 static void *spmc_retrieve_req(uint64_t cookie,
2440 			       struct ffa_mem_transaction_x *trans)
2441 {
2442 	struct ffa_mem_access *acc_descr_array = NULL;
2443 	struct ffa_mem_access_perm *perm_descr = NULL;
2444 	struct thread_smc_args args = {
2445 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2446 		.a3 =	0,	/* Address, Using TX -> MBZ */
2447 		.a4 =   0,	/* Using TX -> MBZ */
2448 	};
2449 	size_t size = 0;
2450 	int rc = 0;
2451 
2452 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2453 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2454 
2455 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2456 		memset(trans_descr, 0, size);
2457 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2458 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2459 		trans_descr->global_handle = cookie;
2460 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2461 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2462 		trans_descr->mem_access_count = 1;
2463 		acc_descr_array = trans_descr->mem_access_array;
2464 	} else {
2465 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2466 
2467 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2468 		memset(trans_descr, 0, size);
2469 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2470 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2471 		trans_descr->global_handle = cookie;
2472 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2473 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2474 		trans_descr->mem_access_count = 1;
2475 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2476 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2477 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2478 					   sizeof(*trans_descr));
2479 	}
2480 	acc_descr_array->region_offs = 0;
2481 	acc_descr_array->reserved = 0;
2482 	perm_descr = &acc_descr_array->access_perm;
2483 	perm_descr->endpoint_id = optee_endpoint_id;
2484 	perm_descr->perm = FFA_MEM_ACC_RW;
2485 	perm_descr->flags = 0;
2486 
2487 	args.a1 = size; /* Total Length */
2488 	args.a2 = size; /* Frag Length == Total length */
2489 	thread_smccc(&args);
2490 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2491 		if (args.a0 == FFA_ERROR)
2492 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2493 			     cookie, (int)args.a2);
2494 		else
2495 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2496 			     cookie, args.a0);
2497 		return NULL;
2498 	}
2499 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2500 				       my_rxtx.size, trans);
2501 	if (rc) {
2502 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2503 		     cookie, rc);
2504 		return NULL;
2505 	}
2506 
2507 	return my_rxtx.rx;
2508 }
2509 
2510 void thread_spmc_relinquish(uint64_t cookie)
2511 {
2512 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2513 	struct thread_smc_args args = {
2514 		.a0 = FFA_MEM_RELINQUISH,
2515 	};
2516 
2517 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2518 	relinquish_desc->handle = cookie;
2519 	relinquish_desc->flags = 0;
2520 	relinquish_desc->endpoint_count = 1;
2521 	relinquish_desc->endpoint_id_array[0] = optee_endpoint_id;
2522 	thread_smccc(&args);
2523 	if (!is_ffa_success(args.a0))
2524 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2525 }
2526 
2527 static int set_pages(struct ffa_address_range *regions,
2528 		     unsigned int num_regions, unsigned int num_pages,
2529 		     struct mobj_ffa *mf)
2530 {
2531 	unsigned int n = 0;
2532 	unsigned int idx = 0;
2533 
2534 	for (n = 0; n < num_regions; n++) {
2535 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2536 		uint64_t addr = READ_ONCE(regions[n].address);
2537 
2538 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2539 			return FFA_INVALID_PARAMETERS;
2540 	}
2541 
2542 	if (idx != num_pages)
2543 		return FFA_INVALID_PARAMETERS;
2544 
2545 	return 0;
2546 }
2547 
2548 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2549 {
2550 	struct mobj_ffa *ret = NULL;
2551 	struct ffa_mem_transaction_x retrieve_desc = { };
2552 	struct ffa_mem_access *descr_array = NULL;
2553 	struct ffa_mem_region *descr = NULL;
2554 	struct mobj_ffa *mf = NULL;
2555 	unsigned int num_pages = 0;
2556 	unsigned int offs = 0;
2557 	void *buf = NULL;
2558 	struct thread_smc_args ffa_rx_release_args = {
2559 		.a0 = FFA_RX_RELEASE
2560 	};
2561 
2562 	/*
2563 	 * OP-TEE is only supporting a single mem_region while the
2564 	 * specification allows for more than one.
2565 	 */
2566 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2567 	if (!buf) {
2568 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2569 		     cookie);
2570 		return NULL;
2571 	}
2572 
2573 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2574 	offs = READ_ONCE(descr_array->region_offs);
2575 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2576 
2577 	num_pages = READ_ONCE(descr->total_page_count);
2578 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2579 	if (!mf)
2580 		goto out;
2581 
2582 	if (set_pages(descr->address_range_array,
2583 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2584 		mobj_ffa_spmc_delete(mf);
2585 		goto out;
2586 	}
2587 
2588 	ret = mf;
2589 
2590 out:
2591 	/* Release RX buffer after the mem retrieve request. */
2592 	thread_smccc(&ffa_rx_release_args);
2593 
2594 	return ret;
2595 }
2596 
2597 static uint32_t get_ffa_version_from_manifest(void *fdt)
2598 {
2599 	int ret = 0;
2600 	uint32_t vers = 0;
2601 
2602 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
2603 	if (ret < 0) {
2604 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
2605 		panic();
2606 	}
2607 
2608 	ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers);
2609 	if (ret < 0) {
2610 		EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d",
2611 		     fdt, ret);
2612 		panic();
2613 	}
2614 
2615 	return vers;
2616 }
2617 
2618 static TEE_Result spmc_init(void)
2619 {
2620 	uint32_t my_vers = 0;
2621 	uint32_t vers = 0;
2622 
2623 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2624 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2625 				     sizeof(struct notif_vm_bitmap), NULL))
2626 		panic("virt_add_guest_spec_data");
2627 
2628 	my_vers = get_ffa_version_from_manifest(get_manifest_dt());
2629 	if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) {
2630 		EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest",
2631 		     FFA_GET_MAJOR_VERSION(my_vers),
2632 		     FFA_GET_MINOR_VERSION(my_vers));
2633 		panic();
2634 	}
2635 	vers = get_ffa_version(my_vers);
2636 	DMSG("SPMC reported version %"PRIu32".%"PRIu32,
2637 	     FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers));
2638 	if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) {
2639 		EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"",
2640 		     FFA_GET_MAJOR_VERSION(vers),
2641 		     FFA_GET_MAJOR_VERSION(my_vers));
2642 		panic();
2643 	}
2644 	if (vers < my_vers)
2645 		my_vers = vers;
2646 	DMSG("Using version %"PRIu32".%"PRIu32"",
2647 	     FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers));
2648 	my_rxtx.ffa_vers = my_vers;
2649 
2650 	spmc_rxtx_map(&my_rxtx);
2651 
2652 	spmc_id = ffa_spm_id_get();
2653 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2654 
2655 	optee_endpoint_id = ffa_id_get();
2656 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2657 
2658 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2659 		spmc_notif_is_ready = true;
2660 		DMSG("Asynchronous notifications are ready");
2661 	}
2662 
2663 	return TEE_SUCCESS;
2664 }
2665 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2666 
2667 nex_service_init(spmc_init);
2668