xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 273a583ea99627ff3b8ccbbaedbdacecd0909b2e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2025, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/dt.h>
12 #include <kernel/interrupt.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/secure_partition.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/spmc_sp_handler.h>
18 #include <kernel/tee_misc.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_private.h>
21 #include <kernel/thread_spmc.h>
22 #include <kernel/virtualization.h>
23 #include <libfdt.h>
24 #include <mm/core_mmu.h>
25 #include <mm/mobj.h>
26 #include <optee_ffa.h>
27 #include <optee_msg.h>
28 #include <optee_rpc_cmd.h>
29 #include <sm/optee_smc.h>
30 #include <string.h>
31 #include <sys/queue.h>
32 #include <tee/entry_std.h>
33 #include <tee/uuid.h>
34 #include <tee_api_types.h>
35 #include <types_ext.h>
36 #include <util.h>
37 
38 #if defined(CFG_CORE_SEL1_SPMC)
39 struct mem_share_state {
40 	struct mobj_ffa *mf;
41 	unsigned int page_count;
42 	unsigned int region_count;
43 	unsigned int current_page_idx;
44 };
45 
46 struct mem_frag_state {
47 	struct mem_share_state share;
48 	tee_mm_entry_t *mm;
49 	unsigned int frag_offset;
50 	SLIST_ENTRY(mem_frag_state) link;
51 };
52 #endif
53 
54 struct notif_vm_bitmap {
55 	bool initialized;
56 	int do_bottom_half_value;
57 	uint64_t pending;
58 	uint64_t bound;
59 };
60 
61 STAILQ_HEAD(spmc_lsp_desc_head, spmc_lsp_desc);
62 
63 static struct spmc_lsp_desc_head lsp_head __nex_data =
64 	STAILQ_HEAD_INITIALIZER(lsp_head);
65 
66 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
67 static bool spmc_notif_is_ready __nex_bss;
68 static int notif_intid __nex_data __maybe_unused = -1;
69 
70 /* Id used to look up the guest specific struct notif_vm_bitmap */
71 static unsigned int notif_vm_bitmap_id __nex_bss;
72 /* Notification state when ns-virtualization isn't enabled */
73 static struct notif_vm_bitmap default_notif_vm_bitmap;
74 
75 /* Initialized in spmc_init() below */
76 static struct spmc_lsp_desc optee_core_lsp;
77 #ifdef CFG_CORE_SEL1_SPMC
78 /*
79  * Representation of the internal SPMC when OP-TEE is the S-EL1 SPMC.
80  * Initialized in spmc_init() below.
81  */
82 static struct spmc_lsp_desc optee_spmc_lsp;
83 /* FF-A ID of the SPMD. This is only valid when OP-TEE is the S-EL1 SPMC. */
84 static uint16_t spmd_id __nex_bss;
85 
86 /*
87  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
88  *
89  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
90  * access this includes the use of content of struct ffa_rxtx::rx and
91  * @frag_state_head.
92  *
93  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
94  * ffa_rxtx::tx and false when it is owned by normal world.
95  *
96  * Note that we can't prevent normal world from updating the content of
97  * these buffers so we must always be careful when reading. while we hold
98  * the lock.
99  */
100 
101 static struct ffa_rxtx my_rxtx __nex_bss;
102 
103 static bool is_nw_buf(struct ffa_rxtx *rxtx)
104 {
105 	return rxtx == &my_rxtx;
106 }
107 
108 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
109 	SLIST_HEAD_INITIALIZER(&frag_state_head);
110 
111 #else
112 /* FF-A ID of the external SPMC */
113 static uint16_t spmc_id __nex_bss;
114 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
115 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE) __nex_bss;
116 static struct ffa_rxtx my_rxtx __nex_data = {
117 	.rx = __rx_buf,
118 	.tx = __tx_buf,
119 	.size = sizeof(__rx_buf),
120 };
121 #endif
122 
123 bool spmc_is_reserved_id(uint16_t id)
124 {
125 #ifdef CFG_CORE_SEL1_SPMC
126 	return id == spmd_id;
127 #else
128 	return id == spmc_id;
129 #endif
130 }
131 
132 struct spmc_lsp_desc *spmc_find_lsp_by_sp_id(uint16_t sp_id)
133 {
134 	struct spmc_lsp_desc *desc = NULL;
135 
136 	STAILQ_FOREACH(desc, &lsp_head, link)
137 		if (desc->sp_id == sp_id)
138 			return desc;
139 
140 	return NULL;
141 }
142 
143 static uint32_t swap_src_dst(uint32_t src_dst)
144 {
145 	return (src_dst >> 16) | (src_dst << 16);
146 }
147 
148 static uint16_t get_sender_id(uint32_t src_dst)
149 {
150 	return src_dst >> 16;
151 }
152 
153 void spmc_set_args(struct thread_smc_1_2_regs *args, uint32_t fid,
154 		   uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
155 		   uint32_t w5)
156 {
157 	*args = (struct thread_smc_1_2_regs){
158 		.a0 = fid,
159 		.a1 = src_dst,
160 		.a2 = w2,
161 		.a3 = w3,
162 		.a4 = w4,
163 		.a5 = w5,
164 	};
165 }
166 
167 static void set_simple_ret_val(struct thread_smc_1_2_regs *args, int ffa_ret)
168 {
169 	if (ffa_ret)
170 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
171 	else
172 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
173 }
174 
175 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
176 {
177 	uint32_t major_vers = FFA_GET_MAJOR_VERSION(vers);
178 	uint32_t minor_vers = FFA_GET_MINOR_VERSION(vers);
179 	uint32_t my_vers = FFA_VERSION_1_2;
180 	uint32_t my_major_vers = 0;
181 	uint32_t my_minor_vers = 0;
182 
183 	my_major_vers = FFA_GET_MAJOR_VERSION(my_vers);
184 	my_minor_vers = FFA_GET_MINOR_VERSION(my_vers);
185 
186 	/*
187 	 * No locking, if the caller does concurrent calls to this it's
188 	 * only making a mess for itself. We must be able to renegotiate
189 	 * the FF-A version in order to support differing versions between
190 	 * the loader and the driver.
191 	 *
192 	 * Callers should use the version requested if we return a matching
193 	 * major version and a matching or larger minor version. The caller
194 	 * should downgrade to our minor version if our minor version is
195 	 * smaller. Regardless, always return our version as recommended by
196 	 * the specification.
197 	 */
198 	if (major_vers == my_major_vers) {
199 		if (minor_vers > my_minor_vers)
200 			rxtx->ffa_vers = my_vers;
201 		else
202 			rxtx->ffa_vers = vers;
203 	}
204 
205 	return my_vers;
206 }
207 
208 static bool is_ffa_success(uint32_t fid)
209 {
210 #ifdef ARM64
211 	if (fid == FFA_SUCCESS_64)
212 		return true;
213 #endif
214 	return fid == FFA_SUCCESS_32;
215 }
216 
217 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
218 {
219 	if (is_ffa_success(args->a0))
220 		return FFA_OK;
221 	if (args->a0 == FFA_ERROR && args->a2)
222 		return args->a2;
223 	return FFA_NOT_SUPPORTED;
224 }
225 
226 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
227 			   unsigned long a3, unsigned long a4)
228 {
229 	struct thread_smc_args args = {
230 		.a0 = fid,
231 		.a1 = a1,
232 		.a2 = a2,
233 		.a3 = a3,
234 		.a4 = a4,
235 	};
236 
237 	thread_smccc(&args);
238 
239 	return get_ffa_ret_code(&args);
240 }
241 
242 static int __maybe_unused ffa_features(uint32_t id)
243 {
244 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
245 }
246 
247 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
248 					       uint32_t flags, uint64_t bitmap)
249 {
250 	return ffa_simple_call(FFA_NOTIFICATION_SET,
251 			       SHIFT_U32(src, 16) | dst, flags,
252 			       low32_from_64(bitmap), high32_from_64(bitmap));
253 }
254 
255 #if defined(CFG_CORE_SEL1_SPMC)
256 static void handle_features(struct thread_smc_1_2_regs *args)
257 {
258 	uint32_t ret_fid = FFA_ERROR;
259 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
260 
261 	switch (args->a1) {
262 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
263 		if (spmc_notif_is_ready) {
264 			ret_fid = FFA_SUCCESS_32;
265 			ret_w2 = notif_intid;
266 		}
267 		break;
268 
269 #ifdef ARM64
270 	case FFA_RXTX_MAP_64:
271 #endif
272 	case FFA_RXTX_MAP_32:
273 		ret_fid = FFA_SUCCESS_32;
274 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
275 		break;
276 #ifdef ARM64
277 	case FFA_MEM_SHARE_64:
278 #endif
279 	case FFA_MEM_SHARE_32:
280 		ret_fid = FFA_SUCCESS_32;
281 		/*
282 		 * Partition manager supports transmission of a memory
283 		 * transaction descriptor in a buffer dynamically allocated
284 		 * by the endpoint.
285 		 */
286 		ret_w2 = BIT(0);
287 		break;
288 
289 	case FFA_ERROR:
290 	case FFA_VERSION:
291 	case FFA_SUCCESS_32:
292 #ifdef ARM64
293 	case FFA_SUCCESS_64:
294 #endif
295 	case FFA_FEATURES:
296 	case FFA_SPM_ID_GET:
297 	case FFA_MEM_FRAG_TX:
298 	case FFA_MEM_RECLAIM:
299 	case FFA_MSG_SEND_DIRECT_REQ_64:
300 	case FFA_MSG_SEND_DIRECT_REQ_32:
301 	case FFA_INTERRUPT:
302 	case FFA_PARTITION_INFO_GET:
303 	case FFA_RXTX_UNMAP:
304 	case FFA_RX_RELEASE:
305 	case FFA_FEATURE_MANAGED_EXIT_INTR:
306 	case FFA_NOTIFICATION_BITMAP_CREATE:
307 	case FFA_NOTIFICATION_BITMAP_DESTROY:
308 	case FFA_NOTIFICATION_BIND:
309 	case FFA_NOTIFICATION_UNBIND:
310 	case FFA_NOTIFICATION_SET:
311 	case FFA_NOTIFICATION_GET:
312 	case FFA_NOTIFICATION_INFO_GET_32:
313 #ifdef ARM64
314 	case FFA_NOTIFICATION_INFO_GET_64:
315 #endif
316 		ret_fid = FFA_SUCCESS_32;
317 		ret_w2 = FFA_PARAM_MBZ;
318 		break;
319 	default:
320 		break;
321 	}
322 
323 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
324 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
325 }
326 
327 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
328 {
329 	tee_mm_entry_t *mm = NULL;
330 
331 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
332 		return FFA_INVALID_PARAMETERS;
333 
334 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
335 	if (!mm)
336 		return FFA_NO_MEMORY;
337 
338 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
339 					  sz / SMALL_PAGE_SIZE,
340 					  MEM_AREA_NSEC_SHM)) {
341 		tee_mm_free(mm);
342 		return FFA_INVALID_PARAMETERS;
343 	}
344 
345 	*va_ret = (void *)tee_mm_get_smem(mm);
346 	return 0;
347 }
348 
349 void spmc_handle_spm_id_get(struct thread_smc_1_2_regs *args)
350 {
351 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, optee_spmc_lsp.sp_id,
352 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
353 }
354 
355 static void unmap_buf(void *va, size_t sz)
356 {
357 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
358 
359 	assert(mm);
360 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
361 	tee_mm_free(mm);
362 }
363 
364 void spmc_handle_rxtx_map(struct thread_smc_1_2_regs *args,
365 			  struct ffa_rxtx *rxtx)
366 {
367 	int rc = 0;
368 	unsigned int sz = 0;
369 	paddr_t rx_pa = 0;
370 	paddr_t tx_pa = 0;
371 	void *rx = NULL;
372 	void *tx = NULL;
373 
374 	cpu_spin_lock(&rxtx->spinlock);
375 
376 	if (args->a3 & GENMASK_64(63, 6)) {
377 		rc = FFA_INVALID_PARAMETERS;
378 		goto out;
379 	}
380 
381 	sz = args->a3 * SMALL_PAGE_SIZE;
382 	if (!sz) {
383 		rc = FFA_INVALID_PARAMETERS;
384 		goto out;
385 	}
386 	/* TX/RX are swapped compared to the caller */
387 	tx_pa = args->a2;
388 	rx_pa = args->a1;
389 
390 	if (rxtx->size) {
391 		rc = FFA_DENIED;
392 		goto out;
393 	}
394 
395 	/*
396 	 * If the buffer comes from a SP the address is virtual and already
397 	 * mapped.
398 	 */
399 	if (is_nw_buf(rxtx)) {
400 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
401 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
402 			bool tx_alloced = false;
403 
404 			/*
405 			 * With virtualization we establish this mapping in
406 			 * the nexus mapping which then is replicated to
407 			 * each partition.
408 			 *
409 			 * This means that this mapping must be done before
410 			 * any partition is created and then must not be
411 			 * changed.
412 			 */
413 
414 			/*
415 			 * core_mmu_add_mapping() may reuse previous
416 			 * mappings. First check if there's any mappings to
417 			 * reuse so we know how to clean up in case of
418 			 * failure.
419 			 */
420 			tx = phys_to_virt(tx_pa, mt, sz);
421 			rx = phys_to_virt(rx_pa, mt, sz);
422 			if (!tx) {
423 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
424 				if (!tx) {
425 					rc = FFA_NO_MEMORY;
426 					goto out;
427 				}
428 				tx_alloced = true;
429 			}
430 			if (!rx)
431 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
432 
433 			if (!rx) {
434 				if (tx_alloced && tx)
435 					core_mmu_remove_mapping(mt, tx, sz);
436 				rc = FFA_NO_MEMORY;
437 				goto out;
438 			}
439 		} else {
440 			rc = map_buf(tx_pa, sz, &tx);
441 			if (rc)
442 				goto out;
443 			rc = map_buf(rx_pa, sz, &rx);
444 			if (rc) {
445 				unmap_buf(tx, sz);
446 				goto out;
447 			}
448 		}
449 		rxtx->tx = tx;
450 		rxtx->rx = rx;
451 	} else {
452 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
453 			rc = FFA_INVALID_PARAMETERS;
454 			goto out;
455 		}
456 
457 		if (!virt_to_phys((void *)tx_pa) ||
458 		    !virt_to_phys((void *)rx_pa)) {
459 			rc = FFA_INVALID_PARAMETERS;
460 			goto out;
461 		}
462 
463 		rxtx->tx = (void *)tx_pa;
464 		rxtx->rx = (void *)rx_pa;
465 	}
466 
467 	rxtx->size = sz;
468 	rxtx->tx_is_mine = true;
469 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
470 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
471 out:
472 	cpu_spin_unlock(&rxtx->spinlock);
473 	set_simple_ret_val(args, rc);
474 }
475 
476 void spmc_handle_rxtx_unmap(struct thread_smc_1_2_regs *args,
477 			    struct ffa_rxtx *rxtx)
478 {
479 	int rc = FFA_INVALID_PARAMETERS;
480 
481 	cpu_spin_lock(&rxtx->spinlock);
482 
483 	if (!rxtx->size)
484 		goto out;
485 
486 	/*
487 	 * We don't unmap the SP memory as the SP might still use it.
488 	 * We avoid to make changes to nexus mappings at this stage since
489 	 * there currently isn't a way to replicate those changes to all
490 	 * partitions.
491 	 */
492 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
493 		unmap_buf(rxtx->rx, rxtx->size);
494 		unmap_buf(rxtx->tx, rxtx->size);
495 	}
496 	rxtx->size = 0;
497 	rxtx->rx = NULL;
498 	rxtx->tx = NULL;
499 	rc = 0;
500 out:
501 	cpu_spin_unlock(&rxtx->spinlock);
502 	set_simple_ret_val(args, rc);
503 }
504 
505 void spmc_handle_rx_release(struct thread_smc_1_2_regs *args,
506 			    struct ffa_rxtx *rxtx)
507 {
508 	int rc = 0;
509 
510 	cpu_spin_lock(&rxtx->spinlock);
511 	/* The senders RX is our TX */
512 	if (!rxtx->size || rxtx->tx_is_mine) {
513 		rc = FFA_DENIED;
514 	} else {
515 		rc = 0;
516 		rxtx->tx_is_mine = true;
517 	}
518 	cpu_spin_unlock(&rxtx->spinlock);
519 
520 	set_simple_ret_val(args, rc);
521 }
522 
523 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
524 {
525 	return !w0 && !w1 && !w2 && !w3;
526 }
527 
528 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
529 				     size_t idx, uint16_t endpoint_id,
530 				     uint16_t execution_context,
531 				     uint32_t part_props,
532 				     const uint32_t uuid_words[4])
533 {
534 	struct ffa_partition_info_x *fpi = NULL;
535 	size_t fpi_size = sizeof(*fpi);
536 
537 	if (ffa_vers >= FFA_VERSION_1_1)
538 		fpi_size += FFA_UUID_SIZE;
539 
540 	if ((idx + 1) * fpi_size > blen)
541 		return TEE_ERROR_OUT_OF_MEMORY;
542 
543 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
544 	fpi->id = endpoint_id;
545 	/* Number of execution contexts implemented by this partition */
546 	fpi->execution_context = execution_context;
547 
548 	fpi->partition_properties = part_props;
549 
550 	/* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
551 	if (ffa_vers < FFA_VERSION_1_1)
552 		fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
553 					     FFA_PART_PROP_DIRECT_REQ_SEND |
554 					     FFA_PART_PROP_INDIRECT_MSGS;
555 
556 	if (ffa_vers >= FFA_VERSION_1_1) {
557 		if (uuid_words)
558 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
559 		else
560 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
561 	}
562 
563 	return TEE_SUCCESS;
564 }
565 
566 static TEE_Result lsp_partition_info_get(uint32_t ffa_vers, void *buf,
567 					 size_t buf_size, size_t *elem_count,
568 					 const uint32_t uuid_words[4],
569 					 bool count_only)
570 {
571 	struct spmc_lsp_desc *desc = NULL;
572 	TEE_Result res = TEE_SUCCESS;
573 	size_t c = *elem_count;
574 
575 	STAILQ_FOREACH(desc, &lsp_head, link) {
576 		/*
577 		 * LSPs (OP-TEE SPMC) without an assigned UUID are not
578 		 * proper LSPs and shouldn't be reported here.
579 		 */
580 		if (is_nil_uuid(desc->uuid_words[0], desc->uuid_words[1],
581 				desc->uuid_words[2], desc->uuid_words[3]))
582 			continue;
583 
584 		if (uuid_words && memcmp(uuid_words, desc->uuid_words,
585 					 sizeof(desc->uuid_words)))
586 			continue;
587 
588 		if (!count_only && !res)
589 			res = spmc_fill_partition_entry(ffa_vers, buf, buf_size,
590 							c, desc->sp_id,
591 							CFG_TEE_CORE_NB_CORE,
592 							desc->properties,
593 							desc->uuid_words);
594 		c++;
595 	}
596 
597 	*elem_count = c;
598 
599 	return res;
600 }
601 
602 void spmc_handle_partition_info_get(struct thread_smc_1_2_regs *args,
603 				    struct ffa_rxtx *rxtx)
604 {
605 	TEE_Result res = TEE_SUCCESS;
606 	uint32_t ret_fid = FFA_ERROR;
607 	uint32_t fpi_size = 0;
608 	uint32_t rc = 0;
609 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
610 	uint32_t uuid_words[4] = { args->a1, args->a2, args->a3, args->a4, };
611 	uint32_t *uuid = uuid_words;
612 	size_t count = 0;
613 
614 	if (!count_only) {
615 		cpu_spin_lock(&rxtx->spinlock);
616 
617 		if (!rxtx->size || !rxtx->tx_is_mine) {
618 			rc = FFA_BUSY;
619 			goto out;
620 		}
621 	}
622 
623 	if (is_nil_uuid(uuid[0], uuid[1], uuid[2], uuid[3]))
624 		uuid = NULL;
625 
626 	if (lsp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
627 				   &count, uuid, count_only)) {
628 		ret_fid = FFA_ERROR;
629 		rc = FFA_INVALID_PARAMETERS;
630 		goto out;
631 	}
632 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
633 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
634 					    rxtx->size, uuid, &count,
635 					    count_only);
636 		if (res != TEE_SUCCESS) {
637 			ret_fid = FFA_ERROR;
638 			rc = FFA_INVALID_PARAMETERS;
639 			goto out;
640 		}
641 	}
642 
643 	rc = count;
644 	ret_fid = FFA_SUCCESS_32;
645 out:
646 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
647 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
648 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
649 
650 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
651 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
652 	if (!count_only) {
653 		rxtx->tx_is_mine = false;
654 		cpu_spin_unlock(&rxtx->spinlock);
655 	}
656 }
657 
658 static void spmc_handle_run(struct thread_smc_1_2_regs *args)
659 {
660 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
661 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
662 	uint32_t rc = FFA_INVALID_PARAMETERS;
663 
664 	/*
665 	 * OP-TEE core threads are only preemted using controlled exit so
666 	 * FFA_RUN mustn't be used to resume such threads.
667 	 *
668 	 * The OP-TEE SPMC is not preemted at all, it's an error to try to
669 	 * resume that ID.
670 	 */
671 	if (spmc_find_lsp_by_sp_id(endpoint))
672 		goto out;
673 
674 	/*
675 	 * The endpoint should be a S-EL0 SP, try to resume the SP from
676 	 * preempted into busy state.
677 	 */
678 	rc = spmc_sp_resume_from_preempted(endpoint);
679 	if (rc)
680 		goto out;
681 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
682 	/*
683 	 * thread_resume_from_rpc() only returns if the thread_id
684 	 * is invalid.
685 	 */
686 	rc = FFA_INVALID_PARAMETERS;
687 
688 out:
689 	set_simple_ret_val(args, rc);
690 }
691 #endif /*CFG_CORE_SEL1_SPMC*/
692 
693 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
694 						   uint16_t vm_id)
695 {
696 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
697 		if (!prtn)
698 			return NULL;
699 		assert(vm_id == virt_get_guest_id(prtn));
700 		return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
701 	}
702 	if (vm_id)
703 		return NULL;
704 	return &default_notif_vm_bitmap;
705 }
706 
707 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
708 					uint16_t vm_id)
709 {
710 	struct guest_partition *prtn = NULL;
711 	struct notif_vm_bitmap *nvb = NULL;
712 	uint32_t old_itr_status = 0;
713 	uint32_t res = 0;
714 
715 	if (!spmc_notif_is_ready) {
716 		/*
717 		 * This should never happen, not if normal world respects the
718 		 * exchanged capabilities.
719 		 */
720 		EMSG("Asynchronous notifications are not ready");
721 		return TEE_ERROR_NOT_IMPLEMENTED;
722 	}
723 
724 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
725 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
726 		return TEE_ERROR_BAD_PARAMETERS;
727 	}
728 
729 	prtn = virt_get_guest(vm_id);
730 	nvb = get_notif_vm_bitmap(prtn, vm_id);
731 	if (!nvb) {
732 		res = TEE_ERROR_BAD_PARAMETERS;
733 		goto out;
734 	}
735 
736 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
737 	nvb->do_bottom_half_value = bottom_half_value;
738 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
739 
740 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
741 	res = TEE_SUCCESS;
742 out:
743 	virt_put_guest(prtn);
744 	return res;
745 }
746 
747 static uint32_t get_direct_resp_fid(uint32_t fid)
748 {
749 	assert(fid == FFA_MSG_SEND_DIRECT_REQ_64 ||
750 	       fid == FFA_MSG_SEND_DIRECT_REQ_32);
751 
752 	if (OPTEE_SMC_IS_64(fid))
753 		return FFA_MSG_SEND_DIRECT_RESP_64;
754 	return FFA_MSG_SEND_DIRECT_RESP_32;
755 }
756 
757 static void handle_yielding_call(struct thread_smc_1_2_regs *args)
758 {
759 	uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
760 	TEE_Result res = TEE_SUCCESS;
761 
762 	thread_check_canaries();
763 
764 #ifdef ARM64
765 	/* Saving this for an eventual RPC */
766 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
767 #endif
768 
769 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
770 		/* Note connection to struct thread_rpc_arg::ret */
771 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
772 				       0);
773 		res = TEE_ERROR_BAD_PARAMETERS;
774 	} else {
775 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
776 				     args->a6, args->a7);
777 		res = TEE_ERROR_BUSY;
778 	}
779 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
780 		      0, res, 0, 0);
781 }
782 
783 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
784 {
785 	uint64_t cookie = reg_pair_to_64(a5, a4);
786 	uint32_t res = 0;
787 
788 	res = mobj_ffa_unregister_by_cookie(cookie);
789 	switch (res) {
790 	case TEE_SUCCESS:
791 	case TEE_ERROR_ITEM_NOT_FOUND:
792 		return 0;
793 	case TEE_ERROR_BUSY:
794 		EMSG("res %#"PRIx32, res);
795 		return FFA_BUSY;
796 	default:
797 		EMSG("res %#"PRIx32, res);
798 		return FFA_INVALID_PARAMETERS;
799 	}
800 }
801 
802 static void handle_blocking_call(struct thread_smc_1_2_regs *args)
803 {
804 	uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
805 	uint32_t sec_caps = 0;
806 
807 	switch (args->a3) {
808 	case OPTEE_FFA_GET_API_VERSION:
809 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
810 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
811 			      0);
812 		break;
813 	case OPTEE_FFA_GET_OS_VERSION:
814 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
815 			      CFG_OPTEE_REVISION_MAJOR,
816 			      CFG_OPTEE_REVISION_MINOR,
817 			      TEE_IMPL_GIT_SHA1 >> 32);
818 		break;
819 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
820 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
821 		if (spmc_notif_is_ready)
822 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
823 		if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
824 			sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
825 		spmc_set_args(args, direct_resp_fid,
826 			      swap_src_dst(args->a1), 0, 0,
827 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
828 		break;
829 	case OPTEE_FFA_UNREGISTER_SHM:
830 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
831 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
832 		break;
833 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
834 		spmc_set_args(args, direct_resp_fid,
835 			      swap_src_dst(args->a1), 0,
836 			      spmc_enable_async_notif(args->a4,
837 						      FFA_SRC(args->a1)),
838 			      0, 0);
839 		break;
840 	default:
841 		EMSG("Unhandled blocking service ID %#"PRIx32,
842 		     (uint32_t)args->a3);
843 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
844 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
845 	}
846 }
847 
848 static void handle_framework_direct_request(struct thread_smc_1_2_regs *args)
849 {
850 	uint32_t direct_resp_fid = get_direct_resp_fid(args->a0);
851 	uint32_t w0 = FFA_ERROR;
852 	uint32_t w1 = FFA_PARAM_MBZ;
853 	uint32_t w2 = FFA_NOT_SUPPORTED;
854 	uint32_t w3 = FFA_PARAM_MBZ;
855 
856 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
857 	case FFA_MSG_SEND_VM_CREATED:
858 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
859 			uint16_t guest_id = args->a5;
860 			TEE_Result res = virt_guest_created(guest_id);
861 
862 			w0 = direct_resp_fid;
863 			w1 = swap_src_dst(args->a1);
864 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
865 			if (res == TEE_SUCCESS)
866 				w3 = FFA_OK;
867 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
868 				w3 = FFA_DENIED;
869 			else
870 				w3 = FFA_INVALID_PARAMETERS;
871 		}
872 		break;
873 	case FFA_MSG_SEND_VM_DESTROYED:
874 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
875 			uint16_t guest_id = args->a5;
876 			TEE_Result res = virt_guest_destroyed(guest_id);
877 
878 			w0 = direct_resp_fid;
879 			w1 = swap_src_dst(args->a1);
880 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
881 			if (res == TEE_SUCCESS)
882 				w3 = FFA_OK;
883 			else
884 				w3 = FFA_INVALID_PARAMETERS;
885 		}
886 		break;
887 	case FFA_MSG_VERSION_REQ:
888 		w0 = direct_resp_fid;
889 		w1 = swap_src_dst(args->a1);
890 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
891 		w3 = spmc_exchange_version(args->a3, &my_rxtx);
892 		break;
893 	default:
894 		break;
895 	}
896 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
897 }
898 
899 static void optee_lsp_handle_direct_request(struct thread_smc_1_2_regs *args)
900 {
901 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
902 		handle_framework_direct_request(args);
903 		return;
904 	}
905 
906 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
907 	    virt_set_guest(get_sender_id(args->a1))) {
908 		spmc_set_args(args, get_direct_resp_fid(args->a0),
909 			      swap_src_dst(args->a1), 0,
910 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
911 		return;
912 	}
913 
914 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
915 		handle_yielding_call(args);
916 	else
917 		handle_blocking_call(args);
918 
919 	/*
920 	 * Note that handle_yielding_call() typically only returns if a
921 	 * thread cannot be allocated or found. virt_unset_guest() is also
922 	 * called from thread_state_suspend() and thread_state_free().
923 	 */
924 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
925 		virt_unset_guest();
926 }
927 
928 static void __maybe_unused
929 optee_spmc_lsp_handle_direct_request(struct thread_smc_1_2_regs *args)
930 {
931 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK)
932 		handle_framework_direct_request(args);
933 	else
934 		set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
935 }
936 
937 static void handle_direct_request(struct thread_smc_1_2_regs *args)
938 {
939 	struct spmc_lsp_desc *lsp = spmc_find_lsp_by_sp_id(FFA_DST(args->a1));
940 
941 	if (lsp) {
942 		lsp->direct_req(args);
943 	} else {
944 		spmc_sp_start_thread(args);
945 		/*
946 		 * spmc_sp_start_thread() returns here if the SP ID is
947 		 * invalid.
948 		 */
949 		set_simple_ret_val(args, FFA_INVALID_PARAMETERS);
950 	}
951 }
952 
953 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
954 			      struct ffa_mem_transaction_x *trans)
955 {
956 	uint16_t mem_reg_attr = 0;
957 	uint32_t flags = 0;
958 	uint32_t count = 0;
959 	uint32_t offs = 0;
960 	uint32_t size = 0;
961 	size_t n = 0;
962 
963 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
964 		return FFA_INVALID_PARAMETERS;
965 
966 	if (ffa_vers >= FFA_VERSION_1_1) {
967 		struct ffa_mem_transaction_1_1 *descr = NULL;
968 
969 		if (blen < sizeof(*descr))
970 			return FFA_INVALID_PARAMETERS;
971 
972 		descr = buf;
973 		trans->sender_id = READ_ONCE(descr->sender_id);
974 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
975 		flags = READ_ONCE(descr->flags);
976 		trans->global_handle = READ_ONCE(descr->global_handle);
977 		trans->tag = READ_ONCE(descr->tag);
978 
979 		count = READ_ONCE(descr->mem_access_count);
980 		size = READ_ONCE(descr->mem_access_size);
981 		offs = READ_ONCE(descr->mem_access_offs);
982 	} else {
983 		struct ffa_mem_transaction_1_0 *descr = NULL;
984 
985 		if (blen < sizeof(*descr))
986 			return FFA_INVALID_PARAMETERS;
987 
988 		descr = buf;
989 		trans->sender_id = READ_ONCE(descr->sender_id);
990 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
991 		flags = READ_ONCE(descr->flags);
992 		trans->global_handle = READ_ONCE(descr->global_handle);
993 		trans->tag = READ_ONCE(descr->tag);
994 
995 		count = READ_ONCE(descr->mem_access_count);
996 		size = sizeof(struct ffa_mem_access);
997 		offs = offsetof(struct ffa_mem_transaction_1_0,
998 				mem_access_array);
999 	}
1000 
1001 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
1002 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
1003 		return FFA_INVALID_PARAMETERS;
1004 
1005 	/* Check that the endpoint memory access descriptor array fits */
1006 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
1007 	    n > blen)
1008 		return FFA_INVALID_PARAMETERS;
1009 
1010 	trans->mem_reg_attr = mem_reg_attr;
1011 	trans->flags = flags;
1012 	trans->mem_access_size = size;
1013 	trans->mem_access_count = count;
1014 	trans->mem_access_offs = offs;
1015 	return 0;
1016 }
1017 
1018 #if defined(CFG_CORE_SEL1_SPMC)
1019 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
1020 			 unsigned int mem_access_count, uint8_t *acc_perms,
1021 			 unsigned int *region_offs)
1022 {
1023 	struct ffa_mem_access_perm *descr = NULL;
1024 	struct ffa_mem_access *mem_acc = NULL;
1025 	unsigned int n = 0;
1026 
1027 	for (n = 0; n < mem_access_count; n++) {
1028 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
1029 		descr = &mem_acc->access_perm;
1030 		if (READ_ONCE(descr->endpoint_id) == optee_core_lsp.sp_id) {
1031 			*acc_perms = READ_ONCE(descr->perm);
1032 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
1033 			return 0;
1034 		}
1035 	}
1036 
1037 	return FFA_INVALID_PARAMETERS;
1038 }
1039 
1040 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
1041 			  size_t blen, unsigned int *page_count,
1042 			  unsigned int *region_count, size_t *addr_range_offs)
1043 {
1044 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1045 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1046 	struct ffa_mem_region *region_descr = NULL;
1047 	unsigned int region_descr_offs = 0;
1048 	uint8_t mem_acc_perm = 0;
1049 	size_t n = 0;
1050 
1051 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1052 		return FFA_INVALID_PARAMETERS;
1053 
1054 	/* Check that the access permissions matches what's expected */
1055 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1056 			  mem_trans->mem_access_size,
1057 			  mem_trans->mem_access_count,
1058 			  &mem_acc_perm, &region_descr_offs) ||
1059 	    mem_acc_perm != exp_mem_acc_perm)
1060 		return FFA_INVALID_PARAMETERS;
1061 
1062 	/* Check that the Composite memory region descriptor fits */
1063 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1064 	    n > blen)
1065 		return FFA_INVALID_PARAMETERS;
1066 
1067 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1068 				  struct ffa_mem_region))
1069 		return FFA_INVALID_PARAMETERS;
1070 
1071 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1072 						 region_descr_offs);
1073 	*page_count = READ_ONCE(region_descr->total_page_count);
1074 	*region_count = READ_ONCE(region_descr->address_range_count);
1075 	*addr_range_offs = n;
1076 	return 0;
1077 }
1078 
1079 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1080 				size_t flen)
1081 {
1082 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1083 	struct ffa_address_range *arange = NULL;
1084 	unsigned int n = 0;
1085 
1086 	if (region_count > s->region_count)
1087 		region_count = s->region_count;
1088 
1089 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1090 		return FFA_INVALID_PARAMETERS;
1091 	arange = buf;
1092 
1093 	for (n = 0; n < region_count; n++) {
1094 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1095 		uint64_t addr = READ_ONCE(arange[n].address);
1096 
1097 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1098 					  addr, page_count))
1099 			return FFA_INVALID_PARAMETERS;
1100 	}
1101 
1102 	s->region_count -= region_count;
1103 	if (s->region_count)
1104 		return region_count * sizeof(*arange);
1105 
1106 	if (s->current_page_idx != s->page_count)
1107 		return FFA_INVALID_PARAMETERS;
1108 
1109 	return 0;
1110 }
1111 
1112 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1113 {
1114 	int rc = 0;
1115 
1116 	rc = add_mem_share_helper(&s->share, buf, flen);
1117 	if (rc >= 0) {
1118 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1119 			/* We're not at the end of the descriptor yet */
1120 			if (s->share.region_count)
1121 				return s->frag_offset;
1122 
1123 			/* We're done */
1124 			rc = 0;
1125 		} else {
1126 			rc = FFA_INVALID_PARAMETERS;
1127 		}
1128 	}
1129 
1130 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1131 	if (rc < 0)
1132 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1133 	else
1134 		mobj_ffa_push_to_inactive(s->share.mf);
1135 	free(s);
1136 
1137 	return rc;
1138 }
1139 
1140 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1141 			void *buf)
1142 {
1143 	struct ffa_mem_access_perm *perm = NULL;
1144 	struct ffa_mem_access *mem_acc = NULL;
1145 
1146 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1147 		return false;
1148 
1149 	if (mem_trans->mem_access_count < 1)
1150 		return false;
1151 
1152 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1153 	perm = &mem_acc->access_perm;
1154 
1155 	/*
1156 	 * perm->endpoint_id is read here only to check if the endpoint is
1157 	 * OP-TEE. We do read it later on again, but there are some additional
1158 	 * checks there to make sure that the data is correct.
1159 	 */
1160 	return READ_ONCE(perm->endpoint_id) != optee_core_lsp.sp_id;
1161 }
1162 
1163 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1164 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1165 			 size_t flen, uint64_t *global_handle)
1166 {
1167 	int rc = 0;
1168 	struct mem_share_state share = { };
1169 	size_t addr_range_offs = 0;
1170 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1171 	size_t n = 0;
1172 
1173 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1174 			    &share.region_count, &addr_range_offs);
1175 	if (rc)
1176 		return rc;
1177 
1178 	if (!share.page_count || !share.region_count)
1179 		return FFA_INVALID_PARAMETERS;
1180 
1181 	if (MUL_OVERFLOW(share.region_count,
1182 			 sizeof(struct ffa_address_range), &n) ||
1183 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1184 		return FFA_INVALID_PARAMETERS;
1185 
1186 	if (mem_trans->global_handle)
1187 		cookie = mem_trans->global_handle;
1188 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1189 	if (!share.mf)
1190 		return FFA_NO_MEMORY;
1191 
1192 	if (flen != blen) {
1193 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1194 
1195 		if (!s) {
1196 			rc = FFA_NO_MEMORY;
1197 			goto err;
1198 		}
1199 		s->share = share;
1200 		s->mm = mm;
1201 		s->frag_offset = addr_range_offs;
1202 
1203 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1204 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1205 					flen - addr_range_offs);
1206 
1207 		if (rc >= 0)
1208 			*global_handle = mobj_ffa_get_cookie(share.mf);
1209 
1210 		return rc;
1211 	}
1212 
1213 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1214 				  flen - addr_range_offs);
1215 	if (rc) {
1216 		/*
1217 		 * Number of consumed bytes may be returned instead of 0 for
1218 		 * done.
1219 		 */
1220 		rc = FFA_INVALID_PARAMETERS;
1221 		goto err;
1222 	}
1223 
1224 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1225 
1226 	return 0;
1227 err:
1228 	mobj_ffa_sel1_spmc_delete(share.mf);
1229 	return rc;
1230 }
1231 
1232 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1233 				 unsigned int page_count,
1234 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1235 {
1236 	struct ffa_mem_transaction_x mem_trans = { };
1237 	int rc = 0;
1238 	size_t len = 0;
1239 	void *buf = NULL;
1240 	tee_mm_entry_t *mm = NULL;
1241 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1242 
1243 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1244 		return FFA_INVALID_PARAMETERS;
1245 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1246 		return FFA_INVALID_PARAMETERS;
1247 
1248 	/*
1249 	 * Check that the length reported in flen is covered by len even
1250 	 * if the offset is taken into account.
1251 	 */
1252 	if (len < flen || len - offs < flen)
1253 		return FFA_INVALID_PARAMETERS;
1254 
1255 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1256 	if (!mm)
1257 		return FFA_NO_MEMORY;
1258 
1259 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1260 					  page_count, MEM_AREA_NSEC_SHM)) {
1261 		rc = FFA_INVALID_PARAMETERS;
1262 		goto out;
1263 	}
1264 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1265 
1266 	cpu_spin_lock(&rxtx->spinlock);
1267 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1268 	if (rc)
1269 		goto unlock;
1270 
1271 	if (is_sp_share(&mem_trans, buf)) {
1272 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1273 				       global_handle, NULL);
1274 		goto unlock;
1275 	}
1276 
1277 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1278 	    virt_set_guest(mem_trans.sender_id)) {
1279 		rc = FFA_DENIED;
1280 		goto unlock;
1281 	}
1282 
1283 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1284 
1285 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1286 		virt_unset_guest();
1287 
1288 unlock:
1289 	cpu_spin_unlock(&rxtx->spinlock);
1290 	if (rc > 0)
1291 		return rc;
1292 
1293 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1294 out:
1295 	tee_mm_free(mm);
1296 	return rc;
1297 }
1298 
1299 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1300 				  uint64_t *global_handle,
1301 				  struct ffa_rxtx *rxtx)
1302 {
1303 	struct ffa_mem_transaction_x mem_trans = { };
1304 	int rc = FFA_DENIED;
1305 
1306 	cpu_spin_lock(&rxtx->spinlock);
1307 
1308 	if (!rxtx->rx || flen > rxtx->size)
1309 		goto out;
1310 
1311 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1312 				       &mem_trans);
1313 	if (rc)
1314 		goto out;
1315 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1316 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1317 				       global_handle, NULL);
1318 		goto out;
1319 	}
1320 
1321 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1322 	    virt_set_guest(mem_trans.sender_id))
1323 		goto out;
1324 
1325 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1326 			   global_handle);
1327 
1328 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1329 		virt_unset_guest();
1330 
1331 out:
1332 	cpu_spin_unlock(&rxtx->spinlock);
1333 
1334 	return rc;
1335 }
1336 
1337 static void handle_mem_share(struct thread_smc_1_2_regs *args,
1338 			     struct ffa_rxtx *rxtx)
1339 {
1340 	uint32_t tot_len = args->a1;
1341 	uint32_t frag_len = args->a2;
1342 	uint64_t addr = args->a3;
1343 	uint32_t page_count = args->a4;
1344 	uint32_t ret_w1 = 0;
1345 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1346 	uint32_t ret_w3 = 0;
1347 	uint32_t ret_fid = FFA_ERROR;
1348 	uint64_t global_handle = 0;
1349 	int rc = 0;
1350 
1351 	/* Check that the MBZs are indeed 0 */
1352 	if (args->a5 || args->a6 || args->a7)
1353 		goto out;
1354 
1355 	/* Check that fragment length doesn't exceed total length */
1356 	if (frag_len > tot_len)
1357 		goto out;
1358 
1359 	/* Check for 32-bit calling convention */
1360 	if (args->a0 == FFA_MEM_SHARE_32)
1361 		addr &= UINT32_MAX;
1362 
1363 	if (!addr) {
1364 		/*
1365 		 * The memory transaction descriptor is passed via our rx
1366 		 * buffer.
1367 		 */
1368 		if (page_count)
1369 			goto out;
1370 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1371 					    rxtx);
1372 	} else {
1373 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1374 					   &global_handle, rxtx);
1375 	}
1376 	if (rc < 0) {
1377 		ret_w2 = rc;
1378 	} else if (rc > 0) {
1379 		ret_fid = FFA_MEM_FRAG_RX;
1380 		ret_w3 = rc;
1381 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1382 	} else {
1383 		ret_fid = FFA_SUCCESS_32;
1384 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1385 	}
1386 out:
1387 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1388 }
1389 
1390 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1391 {
1392 	struct mem_frag_state *s = NULL;
1393 
1394 	SLIST_FOREACH(s, &frag_state_head, link)
1395 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1396 			return s;
1397 
1398 	return NULL;
1399 }
1400 
1401 static void handle_mem_frag_tx(struct thread_smc_1_2_regs *args,
1402 			       struct ffa_rxtx *rxtx)
1403 {
1404 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1405 	size_t flen = args->a3;
1406 	uint32_t endpoint_id = args->a4;
1407 	struct mem_frag_state *s = NULL;
1408 	tee_mm_entry_t *mm = NULL;
1409 	unsigned int page_count = 0;
1410 	void *buf = NULL;
1411 	uint32_t ret_w1 = 0;
1412 	uint32_t ret_w2 = 0;
1413 	uint32_t ret_w3 = 0;
1414 	uint32_t ret_fid = 0;
1415 	int rc = 0;
1416 
1417 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1418 		uint16_t guest_id = endpoint_id >> 16;
1419 
1420 		if (!guest_id || virt_set_guest(guest_id)) {
1421 			rc = FFA_INVALID_PARAMETERS;
1422 			goto out_set_rc;
1423 		}
1424 	}
1425 
1426 	/*
1427 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1428 	 * requests.
1429 	 */
1430 
1431 	cpu_spin_lock(&rxtx->spinlock);
1432 
1433 	s = get_frag_state(global_handle);
1434 	if (!s) {
1435 		rc = FFA_INVALID_PARAMETERS;
1436 		goto out;
1437 	}
1438 
1439 	mm = s->mm;
1440 	if (mm) {
1441 		if (flen > tee_mm_get_bytes(mm)) {
1442 			rc = FFA_INVALID_PARAMETERS;
1443 			goto out;
1444 		}
1445 		page_count = s->share.page_count;
1446 		buf = (void *)tee_mm_get_smem(mm);
1447 	} else {
1448 		if (flen > rxtx->size) {
1449 			rc = FFA_INVALID_PARAMETERS;
1450 			goto out;
1451 		}
1452 		buf = rxtx->rx;
1453 	}
1454 
1455 	rc = add_mem_share_frag(s, buf, flen);
1456 out:
1457 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1458 		virt_unset_guest();
1459 
1460 	cpu_spin_unlock(&rxtx->spinlock);
1461 
1462 	if (rc <= 0 && mm) {
1463 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1464 		tee_mm_free(mm);
1465 	}
1466 
1467 out_set_rc:
1468 	if (rc < 0) {
1469 		ret_fid = FFA_ERROR;
1470 		ret_w2 = rc;
1471 	} else if (rc > 0) {
1472 		ret_fid = FFA_MEM_FRAG_RX;
1473 		ret_w3 = rc;
1474 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1475 	} else {
1476 		ret_fid = FFA_SUCCESS_32;
1477 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1478 	}
1479 
1480 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1481 }
1482 
1483 static void handle_mem_reclaim(struct thread_smc_1_2_regs *args)
1484 {
1485 	int rc = FFA_INVALID_PARAMETERS;
1486 	uint64_t cookie = 0;
1487 
1488 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1489 		goto out;
1490 
1491 	cookie = reg_pair_to_64(args->a2, args->a1);
1492 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1493 		uint16_t guest_id = 0;
1494 
1495 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1496 			guest_id = virt_find_guest_by_cookie(cookie);
1497 		} else {
1498 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1499 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1500 		}
1501 		if (!guest_id)
1502 			goto out;
1503 		if (virt_set_guest(guest_id)) {
1504 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1505 								      cookie))
1506 				rc = FFA_OK;
1507 			goto out;
1508 		}
1509 	}
1510 
1511 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1512 	case TEE_SUCCESS:
1513 		rc = FFA_OK;
1514 		break;
1515 	case TEE_ERROR_ITEM_NOT_FOUND:
1516 		DMSG("cookie %#"PRIx64" not found", cookie);
1517 		rc = FFA_INVALID_PARAMETERS;
1518 		break;
1519 	default:
1520 		DMSG("cookie %#"PRIx64" busy", cookie);
1521 		rc = FFA_DENIED;
1522 		break;
1523 	}
1524 
1525 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1526 		virt_unset_guest();
1527 
1528 out:
1529 	set_simple_ret_val(args, rc);
1530 }
1531 
1532 static void handle_notification_bitmap_create(struct thread_smc_1_2_regs *args)
1533 {
1534 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1535 	uint32_t ret_fid = FFA_ERROR;
1536 	uint32_t old_itr_status = 0;
1537 
1538 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1539 	    !args->a5 && !args->a6 && !args->a7) {
1540 		struct guest_partition *prtn = NULL;
1541 		struct notif_vm_bitmap *nvb = NULL;
1542 		uint16_t vm_id = args->a1;
1543 
1544 		prtn = virt_get_guest(vm_id);
1545 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1546 		if (!nvb) {
1547 			ret_val = FFA_INVALID_PARAMETERS;
1548 			goto out_virt_put;
1549 		}
1550 
1551 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1552 
1553 		if (nvb->initialized) {
1554 			ret_val = FFA_DENIED;
1555 			goto out_unlock;
1556 		}
1557 
1558 		nvb->initialized = true;
1559 		nvb->do_bottom_half_value = -1;
1560 		ret_val = FFA_OK;
1561 		ret_fid = FFA_SUCCESS_32;
1562 out_unlock:
1563 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1564 out_virt_put:
1565 		virt_put_guest(prtn);
1566 	}
1567 
1568 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1569 }
1570 
1571 static void handle_notification_bitmap_destroy(struct thread_smc_1_2_regs *args)
1572 {
1573 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1574 	uint32_t ret_fid = FFA_ERROR;
1575 	uint32_t old_itr_status = 0;
1576 
1577 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1578 	    !args->a5 && !args->a6 && !args->a7) {
1579 		struct guest_partition *prtn = NULL;
1580 		struct notif_vm_bitmap *nvb = NULL;
1581 		uint16_t vm_id = args->a1;
1582 
1583 		prtn = virt_get_guest(vm_id);
1584 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1585 		if (!nvb) {
1586 			ret_val = FFA_INVALID_PARAMETERS;
1587 			goto out_virt_put;
1588 		}
1589 
1590 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1591 
1592 		if (nvb->pending || nvb->bound) {
1593 			ret_val = FFA_DENIED;
1594 			goto out_unlock;
1595 		}
1596 
1597 		memset(nvb, 0, sizeof(*nvb));
1598 		ret_val = FFA_OK;
1599 		ret_fid = FFA_SUCCESS_32;
1600 out_unlock:
1601 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1602 out_virt_put:
1603 		virt_put_guest(prtn);
1604 	}
1605 
1606 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1607 }
1608 
1609 static void handle_notification_bind(struct thread_smc_1_2_regs *args)
1610 {
1611 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1612 	struct guest_partition *prtn = NULL;
1613 	struct notif_vm_bitmap *nvb = NULL;
1614 	uint32_t ret_fid = FFA_ERROR;
1615 	uint32_t old_itr_status = 0;
1616 	uint64_t bitmap = 0;
1617 	uint16_t vm_id = 0;
1618 
1619 	if (args->a5 || args->a6 || args->a7)
1620 		goto out;
1621 	if (args->a2) {
1622 		/* We only deal with global notifications */
1623 		ret_val = FFA_DENIED;
1624 		goto out;
1625 	}
1626 
1627 	/* The destination of the eventual notification */
1628 	vm_id = FFA_DST(args->a1);
1629 	bitmap = reg_pair_to_64(args->a4, args->a3);
1630 
1631 	prtn = virt_get_guest(vm_id);
1632 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1633 	if (!nvb) {
1634 		ret_val = FFA_INVALID_PARAMETERS;
1635 		goto out_virt_put;
1636 	}
1637 
1638 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1639 
1640 	if ((bitmap & nvb->bound)) {
1641 		ret_val = FFA_DENIED;
1642 	} else {
1643 		nvb->bound |= bitmap;
1644 		ret_val = FFA_OK;
1645 		ret_fid = FFA_SUCCESS_32;
1646 	}
1647 
1648 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1649 out_virt_put:
1650 	virt_put_guest(prtn);
1651 out:
1652 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1653 }
1654 
1655 static void handle_notification_unbind(struct thread_smc_1_2_regs *args)
1656 {
1657 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1658 	struct guest_partition *prtn = NULL;
1659 	struct notif_vm_bitmap *nvb = NULL;
1660 	uint32_t ret_fid = FFA_ERROR;
1661 	uint32_t old_itr_status = 0;
1662 	uint64_t bitmap = 0;
1663 	uint16_t vm_id = 0;
1664 
1665 	if (args->a2 || args->a5 || args->a6 || args->a7)
1666 		goto out;
1667 
1668 	/* The destination of the eventual notification */
1669 	vm_id = FFA_DST(args->a1);
1670 	bitmap = reg_pair_to_64(args->a4, args->a3);
1671 
1672 	prtn = virt_get_guest(vm_id);
1673 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1674 	if (!nvb) {
1675 		ret_val = FFA_INVALID_PARAMETERS;
1676 		goto out_virt_put;
1677 	}
1678 
1679 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1680 
1681 	if (bitmap & nvb->pending) {
1682 		ret_val = FFA_DENIED;
1683 	} else {
1684 		nvb->bound &= ~bitmap;
1685 		ret_val = FFA_OK;
1686 		ret_fid = FFA_SUCCESS_32;
1687 	}
1688 
1689 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1690 out_virt_put:
1691 	virt_put_guest(prtn);
1692 out:
1693 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1694 }
1695 
1696 static void handle_notification_get(struct thread_smc_1_2_regs *args)
1697 {
1698 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1699 	struct guest_partition *prtn = NULL;
1700 	struct notif_vm_bitmap *nvb = NULL;
1701 	uint32_t ret_fid = FFA_ERROR;
1702 	uint32_t old_itr_status = 0;
1703 	uint16_t vm_id = 0;
1704 	uint32_t w3 = 0;
1705 
1706 	if (args->a5 || args->a6 || args->a7)
1707 		goto out;
1708 	if (!(args->a2 & 0x1)) {
1709 		ret_fid = FFA_SUCCESS_32;
1710 		w2 = 0;
1711 		goto out;
1712 	}
1713 	vm_id = FFA_DST(args->a1);
1714 
1715 	prtn = virt_get_guest(vm_id);
1716 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1717 	if (!nvb)
1718 		goto out_virt_put;
1719 
1720 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1721 
1722 	reg_pair_from_64(nvb->pending, &w3, &w2);
1723 	nvb->pending = 0;
1724 	ret_fid = FFA_SUCCESS_32;
1725 
1726 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1727 out_virt_put:
1728 	virt_put_guest(prtn);
1729 out:
1730 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1731 }
1732 
1733 struct notif_info_get_state {
1734 	struct thread_smc_1_2_regs *args;
1735 	unsigned int ids_per_reg;
1736 	unsigned int ids_count;
1737 	unsigned int id_pos;
1738 	unsigned int count;
1739 	unsigned int max_list_count;
1740 	unsigned int list_count;
1741 };
1742 
1743 static bool add_id_in_regs(struct notif_info_get_state *state,
1744 			   uint16_t id)
1745 {
1746 	unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1747 	unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1748 
1749 	if (reg_idx > 7)
1750 		return false;
1751 
1752 	state->args->a[reg_idx] &= ~SHIFT_U64(0xffff, reg_shift);
1753 	state->args->a[reg_idx] |= (unsigned long)id << reg_shift;
1754 
1755 	state->id_pos++;
1756 	state->count++;
1757 	return true;
1758 }
1759 
1760 static bool add_id_count(struct notif_info_get_state *state)
1761 {
1762 	assert(state->list_count < state->max_list_count &&
1763 	       state->count >= 1 && state->count <= 4);
1764 
1765 	state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1766 	state->list_count++;
1767 	state->count = 0;
1768 
1769 	return state->list_count < state->max_list_count;
1770 }
1771 
1772 static bool add_nvb_to_state(struct notif_info_get_state *state,
1773 			     uint16_t guest_id, struct notif_vm_bitmap *nvb)
1774 {
1775 	if (!nvb->pending)
1776 		return true;
1777 	/*
1778 	 * Add only the guest_id, meaning a global notification for this
1779 	 * guest.
1780 	 *
1781 	 * If notifications for one or more specific vCPUs we'd add those
1782 	 * before calling add_id_count(), but that's not supported.
1783 	 */
1784 	return add_id_in_regs(state, guest_id) && add_id_count(state);
1785 }
1786 
1787 static void handle_notification_info_get(struct thread_smc_1_2_regs *args)
1788 {
1789 	struct notif_info_get_state state = { .args = args };
1790 	uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1791 	struct guest_partition *prtn = NULL;
1792 	struct notif_vm_bitmap *nvb = NULL;
1793 	uint32_t more_pending_flag = 0;
1794 	uint32_t itr_state = 0;
1795 	uint16_t guest_id = 0;
1796 
1797 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1798 	    args->a6 || args->a7)
1799 		goto err;
1800 
1801 	if (OPTEE_SMC_IS_64(args->a0)) {
1802 		spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1803 		state.ids_per_reg = 4;
1804 		state.max_list_count = 31;
1805 	} else {
1806 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1807 		state.ids_per_reg = 2;
1808 		state.max_list_count = 15;
1809 	}
1810 
1811 	while (true) {
1812 		/*
1813 		 * With NS-Virtualization we need to go through all
1814 		 * partitions to collect the notification bitmaps, without
1815 		 * we just check the only notification bitmap we have.
1816 		 */
1817 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1818 			prtn = virt_next_guest(prtn);
1819 			if (!prtn)
1820 				break;
1821 			guest_id = virt_get_guest_id(prtn);
1822 		}
1823 		nvb = get_notif_vm_bitmap(prtn, guest_id);
1824 
1825 		itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1826 		if (!add_nvb_to_state(&state, guest_id, nvb))
1827 			more_pending_flag = BIT(0);
1828 		cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1829 
1830 		if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1831 			break;
1832 	}
1833 	virt_put_guest(prtn);
1834 
1835 	if (!state.id_pos) {
1836 		ffa_res = FFA_NO_DATA;
1837 		goto err;
1838 	}
1839 	args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1840 		   (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1841 		   more_pending_flag;
1842 	return;
1843 err:
1844 	spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1845 }
1846 
1847 void thread_spmc_set_async_notif_intid(int intid)
1848 {
1849 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1850 	notif_intid = intid;
1851 	spmc_notif_is_ready = true;
1852 	DMSG("Asynchronous notifications are ready");
1853 }
1854 
1855 void notif_send_async(uint32_t value, uint16_t guest_id)
1856 {
1857 	struct guest_partition *prtn = NULL;
1858 	struct notif_vm_bitmap *nvb = NULL;
1859 	uint32_t old_itr_status = 0;
1860 
1861 	prtn = virt_get_guest(guest_id);
1862 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1863 
1864 	if (nvb) {
1865 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1866 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1867 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1868 		       notif_intid >= 0);
1869 		nvb->pending |= BIT64(nvb->do_bottom_half_value);
1870 		interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1871 				    ITR_CPU_MASK_TO_THIS_CPU);
1872 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1873 	}
1874 
1875 	virt_put_guest(prtn);
1876 }
1877 #else
1878 void notif_send_async(uint32_t value, uint16_t guest_id)
1879 {
1880 	struct guest_partition *prtn = NULL;
1881 	struct notif_vm_bitmap *nvb = NULL;
1882 	/* global notification, delay notification interrupt */
1883 	uint32_t flags = BIT32(1);
1884 	int res = 0;
1885 
1886 	prtn = virt_get_guest(guest_id);
1887 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1888 
1889 	if (nvb) {
1890 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1891 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1892 		res = ffa_set_notification(guest_id, optee_core_lsp.sp_id,
1893 					   flags,
1894 					   BIT64(nvb->do_bottom_half_value));
1895 		if (res) {
1896 			EMSG("notification set failed with error %d", res);
1897 			panic();
1898 		}
1899 	}
1900 
1901 	virt_put_guest(prtn);
1902 }
1903 #endif
1904 
1905 /* Only called from assembly */
1906 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args);
1907 void thread_spmc_msg_recv(struct thread_smc_1_2_regs *args)
1908 {
1909 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1910 	switch (args->a0) {
1911 #if defined(CFG_CORE_SEL1_SPMC)
1912 	case FFA_FEATURES:
1913 		handle_features(args);
1914 		break;
1915 	case FFA_SPM_ID_GET:
1916 		spmc_handle_spm_id_get(args);
1917 		break;
1918 #ifdef ARM64
1919 	case FFA_RXTX_MAP_64:
1920 #endif
1921 	case FFA_RXTX_MAP_32:
1922 		spmc_handle_rxtx_map(args, &my_rxtx);
1923 		break;
1924 	case FFA_RXTX_UNMAP:
1925 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1926 		break;
1927 	case FFA_RX_RELEASE:
1928 		spmc_handle_rx_release(args, &my_rxtx);
1929 		break;
1930 	case FFA_PARTITION_INFO_GET:
1931 		spmc_handle_partition_info_get(args, &my_rxtx);
1932 		break;
1933 	case FFA_RUN:
1934 		spmc_handle_run(args);
1935 		break;
1936 #endif /*CFG_CORE_SEL1_SPMC*/
1937 	case FFA_INTERRUPT:
1938 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1939 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1940 				      0, 0);
1941 		else
1942 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1943 		break;
1944 #ifdef ARM64
1945 	case FFA_MSG_SEND_DIRECT_REQ_64:
1946 #endif
1947 	case FFA_MSG_SEND_DIRECT_REQ_32:
1948 		handle_direct_request(args);
1949 		break;
1950 #if defined(CFG_CORE_SEL1_SPMC)
1951 #ifdef ARM64
1952 	case FFA_MEM_SHARE_64:
1953 #endif
1954 	case FFA_MEM_SHARE_32:
1955 		handle_mem_share(args, &my_rxtx);
1956 		break;
1957 	case FFA_MEM_RECLAIM:
1958 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1959 		    !ffa_mem_reclaim(args, NULL))
1960 			handle_mem_reclaim(args);
1961 		break;
1962 	case FFA_MEM_FRAG_TX:
1963 		handle_mem_frag_tx(args, &my_rxtx);
1964 		break;
1965 	case FFA_NOTIFICATION_BITMAP_CREATE:
1966 		handle_notification_bitmap_create(args);
1967 		break;
1968 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1969 		handle_notification_bitmap_destroy(args);
1970 		break;
1971 	case FFA_NOTIFICATION_BIND:
1972 		handle_notification_bind(args);
1973 		break;
1974 	case FFA_NOTIFICATION_UNBIND:
1975 		handle_notification_unbind(args);
1976 		break;
1977 	case FFA_NOTIFICATION_GET:
1978 		handle_notification_get(args);
1979 		break;
1980 #ifdef ARM64
1981 	case FFA_NOTIFICATION_INFO_GET_64:
1982 #endif
1983 	case FFA_NOTIFICATION_INFO_GET_32:
1984 		handle_notification_info_get(args);
1985 		break;
1986 #endif /*CFG_CORE_SEL1_SPMC*/
1987 	case FFA_ERROR:
1988 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1989 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1990 			/*
1991 			 * The SPMC will return an FFA_ERROR back so better
1992 			 * panic() now than flooding the log.
1993 			 */
1994 			panic("FFA_ERROR from SPMC is fatal");
1995 		}
1996 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1997 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1998 		break;
1999 	default:
2000 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
2001 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
2002 	}
2003 }
2004 
2005 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
2006 {
2007 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2008 	struct thread_ctx *thr = threads + thread_get_id();
2009 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2010 	struct optee_msg_arg *arg = NULL;
2011 	struct mobj *mobj = NULL;
2012 	uint32_t num_params = 0;
2013 	size_t sz = 0;
2014 
2015 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
2016 	if (!mobj) {
2017 		EMSG("Can't find cookie %#"PRIx64, cookie);
2018 		return TEE_ERROR_BAD_PARAMETERS;
2019 	}
2020 
2021 	res = mobj_inc_map(mobj);
2022 	if (res)
2023 		goto out_put_mobj;
2024 
2025 	res = TEE_ERROR_BAD_PARAMETERS;
2026 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
2027 	if (!arg)
2028 		goto out_dec_map;
2029 
2030 	num_params = READ_ONCE(arg->num_params);
2031 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2032 		goto out_dec_map;
2033 
2034 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2035 
2036 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2037 	if (!thr->rpc_arg)
2038 		goto out_dec_map;
2039 
2040 	virt_on_stdcall();
2041 	res = tee_entry_std(arg, num_params);
2042 
2043 	thread_rpc_shm_cache_clear(&thr->shm_cache);
2044 	thr->rpc_arg = NULL;
2045 
2046 out_dec_map:
2047 	mobj_dec_map(mobj);
2048 out_put_mobj:
2049 	mobj_put(mobj);
2050 	return res;
2051 }
2052 
2053 /*
2054  * Helper routine for the assembly function thread_std_smc_entry()
2055  *
2056  * Note: this function is weak just to make link_dummies_paged.c happy.
2057  */
2058 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2059 				       uint32_t a2, uint32_t a3,
2060 				       uint32_t a4, uint32_t a5 __unused)
2061 {
2062 	/*
2063 	 * Arguments are supplied from handle_yielding_call() as:
2064 	 * a0 <- w1
2065 	 * a1 <- w3
2066 	 * a2 <- w4
2067 	 * a3 <- w5
2068 	 * a4 <- w6
2069 	 * a5 <- w7
2070 	 */
2071 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2072 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2073 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2074 	return FFA_DENIED;
2075 }
2076 
2077 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2078 {
2079 	uint64_t offs = tpm->u.memref.offs;
2080 
2081 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2082 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2083 
2084 	param->u.fmem.offs_low = offs;
2085 	param->u.fmem.offs_high = offs >> 32;
2086 	if (param->u.fmem.offs_high != offs >> 32)
2087 		return false;
2088 
2089 	param->u.fmem.size = tpm->u.memref.size;
2090 	if (tpm->u.memref.mobj) {
2091 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2092 
2093 		/* If a mobj is passed it better be one with a valid cookie. */
2094 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2095 			return false;
2096 		param->u.fmem.global_id = cookie;
2097 	} else {
2098 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2099 	}
2100 
2101 	return true;
2102 }
2103 
2104 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2105 			    struct thread_param *params,
2106 			    struct optee_msg_arg **arg_ret)
2107 {
2108 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2109 	struct thread_ctx *thr = threads + thread_get_id();
2110 	struct optee_msg_arg *arg = thr->rpc_arg;
2111 
2112 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2113 		return TEE_ERROR_BAD_PARAMETERS;
2114 
2115 	if (!arg) {
2116 		EMSG("rpc_arg not set");
2117 		return TEE_ERROR_GENERIC;
2118 	}
2119 
2120 	memset(arg, 0, sz);
2121 	arg->cmd = cmd;
2122 	arg->num_params = num_params;
2123 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2124 
2125 	for (size_t n = 0; n < num_params; n++) {
2126 		switch (params[n].attr) {
2127 		case THREAD_PARAM_ATTR_NONE:
2128 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2129 			break;
2130 		case THREAD_PARAM_ATTR_VALUE_IN:
2131 		case THREAD_PARAM_ATTR_VALUE_OUT:
2132 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2133 			arg->params[n].attr = params[n].attr -
2134 					      THREAD_PARAM_ATTR_VALUE_IN +
2135 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2136 			arg->params[n].u.value.a = params[n].u.value.a;
2137 			arg->params[n].u.value.b = params[n].u.value.b;
2138 			arg->params[n].u.value.c = params[n].u.value.c;
2139 			break;
2140 		case THREAD_PARAM_ATTR_MEMREF_IN:
2141 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2142 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2143 			if (!set_fmem(arg->params + n, params + n))
2144 				return TEE_ERROR_BAD_PARAMETERS;
2145 			break;
2146 		default:
2147 			return TEE_ERROR_BAD_PARAMETERS;
2148 		}
2149 	}
2150 
2151 	if (arg_ret)
2152 		*arg_ret = arg;
2153 
2154 	return TEE_SUCCESS;
2155 }
2156 
2157 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2158 				struct thread_param *params)
2159 {
2160 	for (size_t n = 0; n < num_params; n++) {
2161 		switch (params[n].attr) {
2162 		case THREAD_PARAM_ATTR_VALUE_OUT:
2163 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2164 			params[n].u.value.a = arg->params[n].u.value.a;
2165 			params[n].u.value.b = arg->params[n].u.value.b;
2166 			params[n].u.value.c = arg->params[n].u.value.c;
2167 			break;
2168 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2169 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2170 			params[n].u.memref.size = arg->params[n].u.fmem.size;
2171 			break;
2172 		default:
2173 			break;
2174 		}
2175 	}
2176 
2177 	return arg->ret;
2178 }
2179 
2180 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2181 			struct thread_param *params)
2182 {
2183 	struct thread_rpc_arg rpc_arg = { .call = {
2184 			.w1 = thread_get_tsd()->rpc_target_info,
2185 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2186 		},
2187 	};
2188 	struct optee_msg_arg *arg = NULL;
2189 	uint32_t ret = 0;
2190 
2191 	ret = get_rpc_arg(cmd, num_params, params, &arg);
2192 	if (ret)
2193 		return ret;
2194 
2195 	thread_rpc(&rpc_arg);
2196 
2197 	return get_rpc_arg_res(arg, num_params, params);
2198 }
2199 
2200 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2201 {
2202 	struct thread_rpc_arg rpc_arg = { .call = {
2203 			.w1 = thread_get_tsd()->rpc_target_info,
2204 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2205 		},
2206 	};
2207 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2208 	uint32_t res2 = 0;
2209 	uint32_t res = 0;
2210 
2211 	DMSG("freeing cookie %#"PRIx64, cookie);
2212 
2213 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
2214 
2215 	mobj_put(mobj);
2216 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2217 	if (res2)
2218 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2219 		     cookie, res2);
2220 	if (!res)
2221 		thread_rpc(&rpc_arg);
2222 }
2223 
2224 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2225 {
2226 	struct thread_rpc_arg rpc_arg = { .call = {
2227 			.w1 = thread_get_tsd()->rpc_target_info,
2228 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2229 		},
2230 	};
2231 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2232 	struct optee_msg_arg *arg = NULL;
2233 	unsigned int internal_offset = 0;
2234 	struct mobj *mobj = NULL;
2235 	uint64_t cookie = 0;
2236 
2237 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2238 		return NULL;
2239 
2240 	thread_rpc(&rpc_arg);
2241 
2242 	if (arg->num_params != 1 ||
2243 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2244 		return NULL;
2245 
2246 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2247 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2248 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2249 	if (!mobj) {
2250 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2251 		     cookie, internal_offset);
2252 		return NULL;
2253 	}
2254 
2255 	assert(mobj_is_nonsec(mobj));
2256 
2257 	if (mobj->size < size) {
2258 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2259 		mobj_put(mobj);
2260 		return NULL;
2261 	}
2262 
2263 	if (mobj_inc_map(mobj)) {
2264 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2265 		mobj_put(mobj);
2266 		return NULL;
2267 	}
2268 
2269 	return mobj;
2270 }
2271 
2272 struct mobj *thread_rpc_alloc_payload(size_t size)
2273 {
2274 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2275 }
2276 
2277 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2278 {
2279 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2280 }
2281 
2282 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2283 {
2284 	if (mobj)
2285 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2286 				mobj_get_cookie(mobj), mobj);
2287 }
2288 
2289 void thread_rpc_free_payload(struct mobj *mobj)
2290 {
2291 	if (mobj)
2292 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2293 				mobj);
2294 }
2295 
2296 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2297 {
2298 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2299 }
2300 
2301 void thread_rpc_free_global_payload(struct mobj *mobj)
2302 {
2303 	if (mobj)
2304 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2305 				mobj_get_cookie(mobj), mobj);
2306 }
2307 
2308 void thread_spmc_register_secondary_ep(vaddr_t ep)
2309 {
2310 	unsigned long ret = 0;
2311 
2312 	/* Let the SPM know the entry point for secondary CPUs */
2313 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2314 
2315 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2316 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2317 }
2318 
2319 static uint16_t ffa_id_get(void)
2320 {
2321 	/*
2322 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2323 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2324 	 * the partition ID (if not).
2325 	 */
2326 	struct thread_smc_args args = {
2327 		.a0 = FFA_ID_GET,
2328 	};
2329 
2330 	thread_smccc(&args);
2331 	if (!is_ffa_success(args.a0)) {
2332 		if (args.a0 == FFA_ERROR)
2333 			EMSG("Get id failed with error %ld", args.a2);
2334 		else
2335 			EMSG("Get id failed");
2336 		panic();
2337 	}
2338 
2339 	return args.a2;
2340 }
2341 
2342 static uint16_t ffa_spm_id_get(void)
2343 {
2344 	/*
2345 	 * Ask the SPM component running at a higher EL to return its ID.
2346 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2347 	 * If not, the ID of the SPMC will be returned.
2348 	 */
2349 	struct thread_smc_args args = {
2350 		.a0 = FFA_SPM_ID_GET,
2351 	};
2352 
2353 	thread_smccc(&args);
2354 	if (!is_ffa_success(args.a0)) {
2355 		if (args.a0 == FFA_ERROR)
2356 			EMSG("Get spm id failed with error %ld", args.a2);
2357 		else
2358 			EMSG("Get spm id failed");
2359 		panic();
2360 	}
2361 
2362 	return args.a2;
2363 }
2364 
2365 static TEE_Result check_desc(struct spmc_lsp_desc *d)
2366 {
2367 	uint32_t accept_props = FFA_PART_PROP_DIRECT_REQ_RECV |
2368 				FFA_PART_PROP_DIRECT_REQ_SEND |
2369 				FFA_PART_PROP_NOTIF_CREATED |
2370 				FFA_PART_PROP_NOTIF_DESTROYED |
2371 				FFA_PART_PROP_AARCH64_STATE;
2372 	uint32_t id = d->sp_id;
2373 
2374 	if (id && (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id) ||
2375 		   id < FFA_SWD_ID_MIN || id > FFA_SWD_ID_MAX)) {
2376 		EMSG("Conflicting SP id for SP \"%s\" id %#"PRIx32,
2377 		     d->name, id);
2378 		if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2379 			panic();
2380 		return TEE_ERROR_BAD_FORMAT;
2381 	}
2382 
2383 	if (d->properties & ~accept_props) {
2384 		EMSG("Unexpected properties in %#"PRIx32" for LSP \"%s\" %#"PRIx16,
2385 		     d->properties, d->name, d->sp_id);
2386 		if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2387 			panic();
2388 		d->properties &= accept_props;
2389 	}
2390 
2391 	if (!d->direct_req) {
2392 		EMSG("Missing direct request callback for LSP \"%s\" %#"PRIx16,
2393 		     d->name, d->sp_id);
2394 		if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2395 			panic();
2396 		return TEE_ERROR_BAD_FORMAT;
2397 	}
2398 
2399 	if (!d->uuid_words[0] && !d->uuid_words[1] &&
2400 	    !d->uuid_words[2] && !d->uuid_words[3]) {
2401 		EMSG("Found NULL UUID for LSP \"%s\" %#"PRIx16,
2402 		     d->name, d->sp_id);
2403 		if (!IS_ENABLED(CFG_SP_SKIP_FAILED))
2404 			panic();
2405 		return TEE_ERROR_BAD_FORMAT;
2406 	}
2407 
2408 	return TEE_SUCCESS;
2409 }
2410 
2411 static uint16_t find_unused_sp_id(void)
2412 {
2413 	uint32_t id = FFA_SWD_ID_MIN;
2414 
2415 	while (spmc_is_reserved_id(id) || spmc_find_lsp_by_sp_id(id)) {
2416 		id++;
2417 		assert(id <= FFA_SWD_ID_MAX);
2418 	}
2419 
2420 	return id;
2421 }
2422 
2423 TEE_Result spmc_register_lsp(struct spmc_lsp_desc *desc)
2424 {
2425 	TEE_Result res = TEE_SUCCESS;
2426 
2427 	res = check_desc(desc);
2428 	if (res)
2429 		return res;
2430 
2431 	if (STAILQ_EMPTY(&lsp_head)) {
2432 		DMSG("Cannot add Logical SP \"%s\": LSP framework not initialized yet",
2433 		     desc->name);
2434 		return TEE_ERROR_ITEM_NOT_FOUND;
2435 	}
2436 
2437 	if (!desc->sp_id)
2438 		desc->sp_id = find_unused_sp_id();
2439 
2440 	DMSG("Adding Logical SP \"%s\" with id %#"PRIx16,
2441 	     desc->name, desc->sp_id);
2442 
2443 	STAILQ_INSERT_TAIL(&lsp_head, desc, link);
2444 
2445 	return TEE_SUCCESS;
2446 }
2447 
2448 static struct spmc_lsp_desc optee_core_lsp __nex_data = {
2449 	.name = "OP-TEE",
2450 	.direct_req = optee_lsp_handle_direct_request,
2451 	.properties = FFA_PART_PROP_DIRECT_REQ_RECV |
2452 		      FFA_PART_PROP_DIRECT_REQ_SEND |
2453 #ifdef CFG_NS_VIRTUALIZATION
2454 		      FFA_PART_PROP_NOTIF_CREATED |
2455 		      FFA_PART_PROP_NOTIF_DESTROYED |
2456 #endif
2457 		      FFA_PART_PROP_AARCH64_STATE |
2458 		      FFA_PART_PROP_IS_PE_ID,
2459 	/*
2460 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
2461 	 *   SP, or
2462 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
2463 	 *   logical partition, residing in the same exception level as the
2464 	 *   SPMC
2465 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
2466 	 */
2467 	.uuid_words = { 0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5, },
2468 };
2469 
2470 #if defined(CFG_CORE_SEL1_SPMC)
2471 static struct spmc_lsp_desc optee_spmc_lsp __nex_data = {
2472 	.name = "OP-TEE SPMC",
2473 	.direct_req = optee_spmc_lsp_handle_direct_request,
2474 };
2475 
2476 static TEE_Result spmc_init(void)
2477 {
2478 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2479 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2480 				     sizeof(struct notif_vm_bitmap), NULL))
2481 		panic("virt_add_guest_spec_data");
2482 	spmd_id = ffa_spm_id_get();
2483 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2484 
2485 	optee_spmc_lsp.sp_id = ffa_id_get();
2486 	DMSG("SPMC ID %#"PRIx16, optee_spmc_lsp.sp_id);
2487 	STAILQ_INSERT_HEAD(&lsp_head, &optee_spmc_lsp, link);
2488 
2489 	optee_core_lsp.sp_id = find_unused_sp_id();
2490 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2491 	STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2492 
2493 	/*
2494 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2495 	 * normal world regardless of what version we query the SPM with.
2496 	 * However, if SPMD think we are version 1.1 it will forward
2497 	 * queries from normal world to let us negotiate version. So by
2498 	 * setting version 1.0 here we should be compatible.
2499 	 *
2500 	 * Note that disagreement on negotiated version means that we'll
2501 	 * have communication problems with normal world.
2502 	 */
2503 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2504 
2505 	return TEE_SUCCESS;
2506 }
2507 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2508 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2509 {
2510 	struct thread_smc_args args = {
2511 #ifdef ARM64
2512 		.a0 = FFA_RXTX_MAP_64,
2513 #else
2514 		.a0 = FFA_RXTX_MAP_32,
2515 #endif
2516 		.a1 = virt_to_phys(rxtx->tx),
2517 		.a2 = virt_to_phys(rxtx->rx),
2518 		.a3 = 1,
2519 	};
2520 
2521 	thread_smccc(&args);
2522 	if (!is_ffa_success(args.a0)) {
2523 		if (args.a0 == FFA_ERROR)
2524 			EMSG("rxtx map failed with error %ld", args.a2);
2525 		else
2526 			EMSG("rxtx map failed");
2527 		panic();
2528 	}
2529 }
2530 
2531 static uint32_t get_ffa_version(uint32_t my_version)
2532 {
2533 	struct thread_smc_args args = {
2534 		.a0 = FFA_VERSION,
2535 		.a1 = my_version,
2536 	};
2537 
2538 	thread_smccc(&args);
2539 	if (args.a0 & BIT(31)) {
2540 		EMSG("FF-A version failed with error %ld", args.a0);
2541 		panic();
2542 	}
2543 
2544 	return args.a0;
2545 }
2546 
2547 static void *spmc_retrieve_req(uint64_t cookie,
2548 			       struct ffa_mem_transaction_x *trans)
2549 {
2550 	struct ffa_mem_access *acc_descr_array = NULL;
2551 	struct ffa_mem_access_perm *perm_descr = NULL;
2552 	struct thread_smc_args args = {
2553 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2554 		.a3 =	0,	/* Address, Using TX -> MBZ */
2555 		.a4 =   0,	/* Using TX -> MBZ */
2556 	};
2557 	size_t size = 0;
2558 	int rc = 0;
2559 
2560 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2561 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2562 
2563 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2564 		memset(trans_descr, 0, size);
2565 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2566 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2567 		trans_descr->global_handle = cookie;
2568 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2569 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2570 		trans_descr->mem_access_count = 1;
2571 		acc_descr_array = trans_descr->mem_access_array;
2572 	} else {
2573 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2574 
2575 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2576 		memset(trans_descr, 0, size);
2577 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2578 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2579 		trans_descr->global_handle = cookie;
2580 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2581 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2582 		trans_descr->mem_access_count = 1;
2583 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2584 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2585 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2586 					   sizeof(*trans_descr));
2587 	}
2588 	acc_descr_array->region_offs = 0;
2589 	acc_descr_array->reserved = 0;
2590 	perm_descr = &acc_descr_array->access_perm;
2591 	perm_descr->endpoint_id = optee_core_lsp.sp_id;
2592 	perm_descr->perm = FFA_MEM_ACC_RW;
2593 	perm_descr->flags = 0;
2594 
2595 	args.a1 = size; /* Total Length */
2596 	args.a2 = size; /* Frag Length == Total length */
2597 	thread_smccc(&args);
2598 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2599 		if (args.a0 == FFA_ERROR)
2600 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2601 			     cookie, (int)args.a2);
2602 		else
2603 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2604 			     cookie, args.a0);
2605 		return NULL;
2606 	}
2607 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2608 				       my_rxtx.size, trans);
2609 	if (rc) {
2610 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2611 		     cookie, rc);
2612 		return NULL;
2613 	}
2614 
2615 	return my_rxtx.rx;
2616 }
2617 
2618 void thread_spmc_relinquish(uint64_t cookie)
2619 {
2620 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2621 	struct thread_smc_args args = {
2622 		.a0 = FFA_MEM_RELINQUISH,
2623 	};
2624 
2625 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2626 	relinquish_desc->handle = cookie;
2627 	relinquish_desc->flags = 0;
2628 	relinquish_desc->endpoint_count = 1;
2629 	relinquish_desc->endpoint_id_array[0] = optee_core_lsp.sp_id;
2630 	thread_smccc(&args);
2631 	if (!is_ffa_success(args.a0))
2632 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2633 }
2634 
2635 static int set_pages(struct ffa_address_range *regions,
2636 		     unsigned int num_regions, unsigned int num_pages,
2637 		     struct mobj_ffa *mf)
2638 {
2639 	unsigned int n = 0;
2640 	unsigned int idx = 0;
2641 
2642 	for (n = 0; n < num_regions; n++) {
2643 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2644 		uint64_t addr = READ_ONCE(regions[n].address);
2645 
2646 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2647 			return FFA_INVALID_PARAMETERS;
2648 	}
2649 
2650 	if (idx != num_pages)
2651 		return FFA_INVALID_PARAMETERS;
2652 
2653 	return 0;
2654 }
2655 
2656 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2657 {
2658 	struct mobj_ffa *ret = NULL;
2659 	struct ffa_mem_transaction_x retrieve_desc = { };
2660 	struct ffa_mem_access *descr_array = NULL;
2661 	struct ffa_mem_region *descr = NULL;
2662 	struct mobj_ffa *mf = NULL;
2663 	unsigned int num_pages = 0;
2664 	unsigned int offs = 0;
2665 	void *buf = NULL;
2666 	struct thread_smc_args ffa_rx_release_args = {
2667 		.a0 = FFA_RX_RELEASE
2668 	};
2669 
2670 	/*
2671 	 * OP-TEE is only supporting a single mem_region while the
2672 	 * specification allows for more than one.
2673 	 */
2674 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2675 	if (!buf) {
2676 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2677 		     cookie);
2678 		return NULL;
2679 	}
2680 
2681 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2682 	offs = READ_ONCE(descr_array->region_offs);
2683 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2684 
2685 	num_pages = READ_ONCE(descr->total_page_count);
2686 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2687 	if (!mf)
2688 		goto out;
2689 
2690 	if (set_pages(descr->address_range_array,
2691 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2692 		mobj_ffa_spmc_delete(mf);
2693 		goto out;
2694 	}
2695 
2696 	ret = mf;
2697 
2698 out:
2699 	/* Release RX buffer after the mem retrieve request. */
2700 	thread_smccc(&ffa_rx_release_args);
2701 
2702 	return ret;
2703 }
2704 
2705 static uint32_t get_ffa_version_from_manifest(void *fdt)
2706 {
2707 	int ret = 0;
2708 	uint32_t vers = 0;
2709 
2710 	ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
2711 	if (ret < 0) {
2712 		EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
2713 		panic();
2714 	}
2715 
2716 	ret = fdt_read_uint32(fdt, 0, "ffa-version", &vers);
2717 	if (ret < 0) {
2718 		EMSG("Can't read \"ffa-version\" from FF-A manifest at %p: error %d",
2719 		     fdt, ret);
2720 		panic();
2721 	}
2722 
2723 	return vers;
2724 }
2725 
2726 static TEE_Result spmc_init(void)
2727 {
2728 	uint32_t my_vers = 0;
2729 	uint32_t vers = 0;
2730 
2731 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2732 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2733 				     sizeof(struct notif_vm_bitmap), NULL))
2734 		panic("virt_add_guest_spec_data");
2735 
2736 	my_vers = get_ffa_version_from_manifest(get_manifest_dt());
2737 	if (my_vers < FFA_VERSION_1_0 || my_vers > FFA_VERSION_1_2) {
2738 		EMSG("Unsupported version %"PRIu32".%"PRIu32" from manifest",
2739 		     FFA_GET_MAJOR_VERSION(my_vers),
2740 		     FFA_GET_MINOR_VERSION(my_vers));
2741 		panic();
2742 	}
2743 	vers = get_ffa_version(my_vers);
2744 	DMSG("SPMC reported version %"PRIu32".%"PRIu32,
2745 	     FFA_GET_MAJOR_VERSION(vers), FFA_GET_MINOR_VERSION(vers));
2746 	if (FFA_GET_MAJOR_VERSION(vers) != FFA_GET_MAJOR_VERSION(my_vers)) {
2747 		EMSG("Incompatible major version %"PRIu32", expected %"PRIu32"",
2748 		     FFA_GET_MAJOR_VERSION(vers),
2749 		     FFA_GET_MAJOR_VERSION(my_vers));
2750 		panic();
2751 	}
2752 	if (vers < my_vers)
2753 		my_vers = vers;
2754 	DMSG("Using version %"PRIu32".%"PRIu32"",
2755 	     FFA_GET_MAJOR_VERSION(my_vers), FFA_GET_MINOR_VERSION(my_vers));
2756 	my_rxtx.ffa_vers = my_vers;
2757 
2758 	spmc_rxtx_map(&my_rxtx);
2759 
2760 	spmc_id = ffa_spm_id_get();
2761 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2762 
2763 	optee_core_lsp.sp_id = ffa_id_get();
2764 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_core_lsp.sp_id);
2765 	STAILQ_INSERT_HEAD(&lsp_head, &optee_core_lsp, link);
2766 
2767 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2768 		spmc_notif_is_ready = true;
2769 		DMSG("Asynchronous notifications are ready");
2770 	}
2771 
2772 	return TEE_SUCCESS;
2773 }
2774 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2775 
2776 nex_service_init(spmc_init);
2777