xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision cb03400251f98aed22a2664509e3ed9e183800b0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 struct notif_vm_bitmap {
51 	bool initialized;
52 	int do_bottom_half_value;
53 	uint64_t pending;
54 	uint64_t bound;
55 };
56 
57 static unsigned int spmc_notif_lock __nex_data = SPINLOCK_UNLOCK;
58 static bool spmc_notif_is_ready __nex_bss;
59 static int notif_intid __nex_data __maybe_unused = -1;
60 
61 /* Id used to look up the guest specific struct notif_vm_bitmap */
62 static unsigned int notif_vm_bitmap_id __nex_bss;
63 /* Notification state when ns-virtualization isn't enabled */
64 static struct notif_vm_bitmap default_notif_vm_bitmap;
65 
66 /* Initialized in spmc_init() below */
67 uint16_t optee_endpoint_id __nex_bss;
68 uint16_t spmc_id __nex_bss;
69 #ifdef CFG_CORE_SEL1_SPMC
70 uint16_t spmd_id __nex_bss;
71 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
72 				      FFA_PART_PROP_DIRECT_REQ_SEND |
73 #ifdef CFG_NS_VIRTUALIZATION
74 				      FFA_PART_PROP_NOTIF_CREATED |
75 				      FFA_PART_PROP_NOTIF_DESTROYED |
76 #endif
77 #ifdef ARM64
78 				      FFA_PART_PROP_AARCH64_STATE |
79 #endif
80 				      FFA_PART_PROP_IS_PE_ID;
81 
82 static uint32_t my_uuid_words[] = {
83 	/*
84 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
85 	 *   SP, or
86 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
87 	 *   logical partition, residing in the same exception level as the
88 	 *   SPMC
89 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
90 	 */
91 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
92 };
93 
94 /*
95  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
96  *
97  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
98  * access this includes the use of content of struct ffa_rxtx::rx and
99  * @frag_state_head.
100  *
101  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
102  * ffa_rxtx::tx and false when it is owned by normal world.
103  *
104  * Note that we can't prevent normal world from updating the content of
105  * these buffers so we must always be careful when reading. while we hold
106  * the lock.
107  */
108 
109 static struct ffa_rxtx my_rxtx __nex_bss;
110 
111 static bool is_nw_buf(struct ffa_rxtx *rxtx)
112 {
113 	return rxtx == &my_rxtx;
114 }
115 
116 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
117 	SLIST_HEAD_INITIALIZER(&frag_state_head);
118 
119 #else
120 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
121 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
122 static struct ffa_rxtx my_rxtx = {
123 	.rx = __rx_buf,
124 	.tx = __tx_buf,
125 	.size = sizeof(__rx_buf),
126 };
127 #endif
128 
129 static uint32_t swap_src_dst(uint32_t src_dst)
130 {
131 	return (src_dst >> 16) | (src_dst << 16);
132 }
133 
134 static uint16_t get_sender_id(uint32_t src_dst)
135 {
136 	return src_dst >> 16;
137 }
138 
139 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
140 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
141 {
142 	*args = (struct thread_smc_args){ .a0 = fid,
143 					  .a1 = src_dst,
144 					  .a2 = w2,
145 					  .a3 = w3,
146 					  .a4 = w4,
147 					  .a5 = w5, };
148 }
149 
150 static void set_simple_ret_val(struct thread_smc_args *args, int ffa_ret)
151 {
152 	if (ffa_ret)
153 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
154 	else
155 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
156 }
157 
158 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
159 {
160 	/*
161 	 * No locking, if the caller does concurrent calls to this it's
162 	 * only making a mess for itself. We must be able to renegotiate
163 	 * the FF-A version in order to support differing versions between
164 	 * the loader and the driver.
165 	 */
166 	if (vers < FFA_VERSION_1_1)
167 		rxtx->ffa_vers = FFA_VERSION_1_0;
168 	else
169 		rxtx->ffa_vers = FFA_VERSION_1_1;
170 
171 	return rxtx->ffa_vers;
172 }
173 
174 static bool is_ffa_success(uint32_t fid)
175 {
176 #ifdef ARM64
177 	if (fid == FFA_SUCCESS_64)
178 		return true;
179 #endif
180 	return fid == FFA_SUCCESS_32;
181 }
182 
183 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
184 {
185 	if (is_ffa_success(args->a0))
186 		return FFA_OK;
187 	if (args->a0 == FFA_ERROR && args->a2)
188 		return args->a2;
189 	return FFA_NOT_SUPPORTED;
190 }
191 
192 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
193 			   unsigned long a3, unsigned long a4)
194 {
195 	struct thread_smc_args args = {
196 		.a0 = fid,
197 		.a1 = a1,
198 		.a2 = a2,
199 		.a3 = a3,
200 		.a4 = a4,
201 	};
202 
203 	thread_smccc(&args);
204 
205 	return get_ffa_ret_code(&args);
206 }
207 
208 static int __maybe_unused ffa_features(uint32_t id)
209 {
210 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
211 }
212 
213 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
214 					       uint32_t flags, uint64_t bitmap)
215 {
216 	return ffa_simple_call(FFA_NOTIFICATION_SET,
217 			       SHIFT_U32(src, 16) | dst, flags,
218 			       low32_from_64(bitmap), high32_from_64(bitmap));
219 }
220 
221 #if defined(CFG_CORE_SEL1_SPMC)
222 static void handle_features(struct thread_smc_args *args)
223 {
224 	uint32_t ret_fid = FFA_ERROR;
225 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
226 
227 	switch (args->a1) {
228 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
229 		if (spmc_notif_is_ready) {
230 			ret_fid = FFA_SUCCESS_32;
231 			ret_w2 = notif_intid;
232 		}
233 		break;
234 
235 #ifdef ARM64
236 	case FFA_RXTX_MAP_64:
237 #endif
238 	case FFA_RXTX_MAP_32:
239 		ret_fid = FFA_SUCCESS_32;
240 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
241 		break;
242 #ifdef ARM64
243 	case FFA_MEM_SHARE_64:
244 #endif
245 	case FFA_MEM_SHARE_32:
246 		ret_fid = FFA_SUCCESS_32;
247 		/*
248 		 * Partition manager supports transmission of a memory
249 		 * transaction descriptor in a buffer dynamically allocated
250 		 * by the endpoint.
251 		 */
252 		ret_w2 = BIT(0);
253 		break;
254 
255 	case FFA_ERROR:
256 	case FFA_VERSION:
257 	case FFA_SUCCESS_32:
258 #ifdef ARM64
259 	case FFA_SUCCESS_64:
260 #endif
261 	case FFA_FEATURES:
262 	case FFA_SPM_ID_GET:
263 	case FFA_MEM_FRAG_TX:
264 	case FFA_MEM_RECLAIM:
265 	case FFA_MSG_SEND_DIRECT_REQ_64:
266 	case FFA_MSG_SEND_DIRECT_REQ_32:
267 	case FFA_INTERRUPT:
268 	case FFA_PARTITION_INFO_GET:
269 	case FFA_RXTX_UNMAP:
270 	case FFA_RX_RELEASE:
271 	case FFA_FEATURE_MANAGED_EXIT_INTR:
272 	case FFA_NOTIFICATION_BITMAP_CREATE:
273 	case FFA_NOTIFICATION_BITMAP_DESTROY:
274 	case FFA_NOTIFICATION_BIND:
275 	case FFA_NOTIFICATION_UNBIND:
276 	case FFA_NOTIFICATION_SET:
277 	case FFA_NOTIFICATION_GET:
278 	case FFA_NOTIFICATION_INFO_GET_32:
279 #ifdef ARM64
280 	case FFA_NOTIFICATION_INFO_GET_64:
281 #endif
282 		ret_fid = FFA_SUCCESS_32;
283 		ret_w2 = FFA_PARAM_MBZ;
284 		break;
285 	default:
286 		break;
287 	}
288 
289 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
290 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
291 }
292 
293 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
294 {
295 	tee_mm_entry_t *mm = NULL;
296 
297 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
298 		return FFA_INVALID_PARAMETERS;
299 
300 	mm = tee_mm_alloc(&core_virt_shm_pool, sz);
301 	if (!mm)
302 		return FFA_NO_MEMORY;
303 
304 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
305 					  sz / SMALL_PAGE_SIZE,
306 					  MEM_AREA_NSEC_SHM)) {
307 		tee_mm_free(mm);
308 		return FFA_INVALID_PARAMETERS;
309 	}
310 
311 	*va_ret = (void *)tee_mm_get_smem(mm);
312 	return 0;
313 }
314 
315 void spmc_handle_spm_id_get(struct thread_smc_args *args)
316 {
317 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, spmc_id,
318 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
319 }
320 
321 static void unmap_buf(void *va, size_t sz)
322 {
323 	tee_mm_entry_t *mm = tee_mm_find(&core_virt_shm_pool, (vaddr_t)va);
324 
325 	assert(mm);
326 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
327 	tee_mm_free(mm);
328 }
329 
330 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
331 {
332 	int rc = 0;
333 	unsigned int sz = 0;
334 	paddr_t rx_pa = 0;
335 	paddr_t tx_pa = 0;
336 	void *rx = NULL;
337 	void *tx = NULL;
338 
339 	cpu_spin_lock(&rxtx->spinlock);
340 
341 	if (args->a3 & GENMASK_64(63, 6)) {
342 		rc = FFA_INVALID_PARAMETERS;
343 		goto out;
344 	}
345 
346 	sz = args->a3 * SMALL_PAGE_SIZE;
347 	if (!sz) {
348 		rc = FFA_INVALID_PARAMETERS;
349 		goto out;
350 	}
351 	/* TX/RX are swapped compared to the caller */
352 	tx_pa = args->a2;
353 	rx_pa = args->a1;
354 
355 	if (rxtx->size) {
356 		rc = FFA_DENIED;
357 		goto out;
358 	}
359 
360 	/*
361 	 * If the buffer comes from a SP the address is virtual and already
362 	 * mapped.
363 	 */
364 	if (is_nw_buf(rxtx)) {
365 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
366 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
367 			bool tx_alloced = false;
368 
369 			/*
370 			 * With virtualization we establish this mapping in
371 			 * the nexus mapping which then is replicated to
372 			 * each partition.
373 			 *
374 			 * This means that this mapping must be done before
375 			 * any partition is created and then must not be
376 			 * changed.
377 			 */
378 
379 			/*
380 			 * core_mmu_add_mapping() may reuse previous
381 			 * mappings. First check if there's any mappings to
382 			 * reuse so we know how to clean up in case of
383 			 * failure.
384 			 */
385 			tx = phys_to_virt(tx_pa, mt, sz);
386 			rx = phys_to_virt(rx_pa, mt, sz);
387 			if (!tx) {
388 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
389 				if (!tx) {
390 					rc = FFA_NO_MEMORY;
391 					goto out;
392 				}
393 				tx_alloced = true;
394 			}
395 			if (!rx)
396 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
397 
398 			if (!rx) {
399 				if (tx_alloced && tx)
400 					core_mmu_remove_mapping(mt, tx, sz);
401 				rc = FFA_NO_MEMORY;
402 				goto out;
403 			}
404 		} else {
405 			rc = map_buf(tx_pa, sz, &tx);
406 			if (rc)
407 				goto out;
408 			rc = map_buf(rx_pa, sz, &rx);
409 			if (rc) {
410 				unmap_buf(tx, sz);
411 				goto out;
412 			}
413 		}
414 		rxtx->tx = tx;
415 		rxtx->rx = rx;
416 	} else {
417 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
418 			rc = FFA_INVALID_PARAMETERS;
419 			goto out;
420 		}
421 
422 		if (!virt_to_phys((void *)tx_pa) ||
423 		    !virt_to_phys((void *)rx_pa)) {
424 			rc = FFA_INVALID_PARAMETERS;
425 			goto out;
426 		}
427 
428 		rxtx->tx = (void *)tx_pa;
429 		rxtx->rx = (void *)rx_pa;
430 	}
431 
432 	rxtx->size = sz;
433 	rxtx->tx_is_mine = true;
434 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
435 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
436 out:
437 	cpu_spin_unlock(&rxtx->spinlock);
438 	set_simple_ret_val(args, rc);
439 }
440 
441 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
442 {
443 	int rc = FFA_INVALID_PARAMETERS;
444 
445 	cpu_spin_lock(&rxtx->spinlock);
446 
447 	if (!rxtx->size)
448 		goto out;
449 
450 	/*
451 	 * We don't unmap the SP memory as the SP might still use it.
452 	 * We avoid to make changes to nexus mappings at this stage since
453 	 * there currently isn't a way to replicate those changes to all
454 	 * partitions.
455 	 */
456 	if (is_nw_buf(rxtx) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
457 		unmap_buf(rxtx->rx, rxtx->size);
458 		unmap_buf(rxtx->tx, rxtx->size);
459 	}
460 	rxtx->size = 0;
461 	rxtx->rx = NULL;
462 	rxtx->tx = NULL;
463 	rc = 0;
464 out:
465 	cpu_spin_unlock(&rxtx->spinlock);
466 	set_simple_ret_val(args, rc);
467 }
468 
469 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
470 {
471 	int rc = 0;
472 
473 	cpu_spin_lock(&rxtx->spinlock);
474 	/* The senders RX is our TX */
475 	if (!rxtx->size || rxtx->tx_is_mine) {
476 		rc = FFA_DENIED;
477 	} else {
478 		rc = 0;
479 		rxtx->tx_is_mine = true;
480 	}
481 	cpu_spin_unlock(&rxtx->spinlock);
482 
483 	set_simple_ret_val(args, rc);
484 }
485 
486 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
487 {
488 	return !w0 && !w1 && !w2 && !w3;
489 }
490 
491 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
492 {
493 	/*
494 	 * This depends on which UUID we have been assigned.
495 	 * TODO add a generic mechanism to obtain our UUID.
496 	 *
497 	 * The test below is for the hard coded UUID
498 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
499 	 */
500 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
501 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
502 }
503 
504 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
505 				     size_t idx, uint16_t endpoint_id,
506 				     uint16_t execution_context,
507 				     uint32_t part_props,
508 				     const uint32_t uuid_words[4])
509 {
510 	struct ffa_partition_info_x *fpi = NULL;
511 	size_t fpi_size = sizeof(*fpi);
512 
513 	if (ffa_vers >= FFA_VERSION_1_1)
514 		fpi_size += FFA_UUID_SIZE;
515 
516 	if ((idx + 1) * fpi_size > blen)
517 		return TEE_ERROR_OUT_OF_MEMORY;
518 
519 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
520 	fpi->id = endpoint_id;
521 	/* Number of execution contexts implemented by this partition */
522 	fpi->execution_context = execution_context;
523 
524 	fpi->partition_properties = part_props;
525 
526 	/* In FF-A 1.0 only bits [2:0] are defined, let's mask others */
527 	if (ffa_vers < FFA_VERSION_1_1)
528 		fpi->partition_properties &= FFA_PART_PROP_DIRECT_REQ_RECV |
529 					     FFA_PART_PROP_DIRECT_REQ_SEND |
530 					     FFA_PART_PROP_INDIRECT_MSGS;
531 
532 	if (ffa_vers >= FFA_VERSION_1_1) {
533 		if (uuid_words)
534 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
535 		else
536 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
537 	}
538 
539 	return TEE_SUCCESS;
540 }
541 
542 static int handle_partition_info_get_all(size_t *elem_count,
543 					 struct ffa_rxtx *rxtx, bool count_only)
544 {
545 	if (!count_only) {
546 		/* Add OP-TEE SP */
547 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
548 					      rxtx->size, 0, optee_endpoint_id,
549 					      CFG_TEE_CORE_NB_CORE,
550 					      my_part_props, my_uuid_words))
551 			return FFA_NO_MEMORY;
552 	}
553 	*elem_count = 1;
554 
555 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
556 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
557 					  NULL, elem_count, count_only))
558 			return FFA_NO_MEMORY;
559 	}
560 
561 	return FFA_OK;
562 }
563 
564 void spmc_handle_partition_info_get(struct thread_smc_args *args,
565 				    struct ffa_rxtx *rxtx)
566 {
567 	TEE_Result res = TEE_SUCCESS;
568 	uint32_t ret_fid = FFA_ERROR;
569 	uint32_t fpi_size = 0;
570 	uint32_t rc = 0;
571 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
572 
573 	if (!count_only) {
574 		cpu_spin_lock(&rxtx->spinlock);
575 
576 		if (!rxtx->size || !rxtx->tx_is_mine) {
577 			rc = FFA_BUSY;
578 			goto out;
579 		}
580 	}
581 
582 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
583 		size_t elem_count = 0;
584 
585 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
586 							count_only);
587 
588 		if (ret_fid) {
589 			rc = ret_fid;
590 			ret_fid = FFA_ERROR;
591 		} else {
592 			ret_fid = FFA_SUCCESS_32;
593 			rc = elem_count;
594 		}
595 
596 		goto out;
597 	}
598 
599 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
600 		if (!count_only) {
601 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
602 							rxtx->tx, rxtx->size, 0,
603 							optee_endpoint_id,
604 							CFG_TEE_CORE_NB_CORE,
605 							my_part_props,
606 							my_uuid_words);
607 			if (res) {
608 				ret_fid = FFA_ERROR;
609 				rc = FFA_INVALID_PARAMETERS;
610 				goto out;
611 			}
612 		}
613 		rc = 1;
614 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
615 		uint32_t uuid_array[4] = { 0 };
616 		TEE_UUID uuid = { };
617 		size_t count = 0;
618 
619 		uuid_array[0] = args->a1;
620 		uuid_array[1] = args->a2;
621 		uuid_array[2] = args->a3;
622 		uuid_array[3] = args->a4;
623 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
624 
625 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
626 					    rxtx->size, &uuid, &count,
627 					    count_only);
628 		if (res != TEE_SUCCESS) {
629 			ret_fid = FFA_ERROR;
630 			rc = FFA_INVALID_PARAMETERS;
631 			goto out;
632 		}
633 		rc = count;
634 	} else {
635 		ret_fid = FFA_ERROR;
636 		rc = FFA_INVALID_PARAMETERS;
637 		goto out;
638 	}
639 
640 	ret_fid = FFA_SUCCESS_32;
641 
642 out:
643 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
644 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
645 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
646 
647 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
648 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
649 	if (!count_only) {
650 		rxtx->tx_is_mine = false;
651 		cpu_spin_unlock(&rxtx->spinlock);
652 	}
653 }
654 
655 static void spmc_handle_run(struct thread_smc_args *args)
656 {
657 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
658 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
659 	uint32_t rc = FFA_OK;
660 
661 	if (endpoint != optee_endpoint_id) {
662 		/*
663 		 * The endpoint should be an SP, try to resume the SP from
664 		 * preempted into busy state.
665 		 */
666 		rc = spmc_sp_resume_from_preempted(endpoint);
667 		if (rc)
668 			goto out;
669 	}
670 
671 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
672 
673 	/* thread_resume_from_rpc return only of the thread_id is invalid */
674 	rc = FFA_INVALID_PARAMETERS;
675 
676 out:
677 	set_simple_ret_val(args, rc);
678 }
679 #endif /*CFG_CORE_SEL1_SPMC*/
680 
681 static struct notif_vm_bitmap *get_notif_vm_bitmap(struct guest_partition *prtn,
682 						   uint16_t vm_id)
683 {
684 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
685 		if (!prtn)
686 			return NULL;
687 		assert(vm_id == virt_get_guest_id(prtn));
688 		return virt_get_guest_spec_data(prtn, notif_vm_bitmap_id);
689 	}
690 	if (vm_id)
691 		return NULL;
692 	return &default_notif_vm_bitmap;
693 }
694 
695 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
696 					uint16_t vm_id)
697 {
698 	struct guest_partition *prtn = NULL;
699 	struct notif_vm_bitmap *nvb = NULL;
700 	uint32_t old_itr_status = 0;
701 	uint32_t res = 0;
702 
703 	if (!spmc_notif_is_ready) {
704 		/*
705 		 * This should never happen, not if normal world respects the
706 		 * exchanged capabilities.
707 		 */
708 		EMSG("Asynchronous notifications are not ready");
709 		return TEE_ERROR_NOT_IMPLEMENTED;
710 	}
711 
712 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
713 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
714 		return TEE_ERROR_BAD_PARAMETERS;
715 	}
716 
717 	prtn = virt_get_guest(vm_id);
718 	nvb = get_notif_vm_bitmap(prtn, vm_id);
719 	if (!nvb) {
720 		res = TEE_ERROR_BAD_PARAMETERS;
721 		goto out;
722 	}
723 
724 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
725 	nvb->do_bottom_half_value = bottom_half_value;
726 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
727 
728 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED, vm_id);
729 	res = TEE_SUCCESS;
730 out:
731 	virt_put_guest(prtn);
732 	return res;
733 }
734 
735 static void handle_yielding_call(struct thread_smc_args *args,
736 				 uint32_t direct_resp_fid)
737 {
738 	TEE_Result res = 0;
739 
740 	thread_check_canaries();
741 
742 #ifdef ARM64
743 	/* Saving this for an eventual RPC */
744 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
745 #endif
746 
747 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
748 		/* Note connection to struct thread_rpc_arg::ret */
749 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
750 				       0);
751 		res = TEE_ERROR_BAD_PARAMETERS;
752 	} else {
753 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
754 				     args->a6, args->a7);
755 		res = TEE_ERROR_BUSY;
756 	}
757 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
758 		      0, res, 0, 0);
759 }
760 
761 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
762 {
763 	uint64_t cookie = reg_pair_to_64(a5, a4);
764 	uint32_t res = 0;
765 
766 	res = mobj_ffa_unregister_by_cookie(cookie);
767 	switch (res) {
768 	case TEE_SUCCESS:
769 	case TEE_ERROR_ITEM_NOT_FOUND:
770 		return 0;
771 	case TEE_ERROR_BUSY:
772 		EMSG("res %#"PRIx32, res);
773 		return FFA_BUSY;
774 	default:
775 		EMSG("res %#"PRIx32, res);
776 		return FFA_INVALID_PARAMETERS;
777 	}
778 }
779 
780 static void handle_blocking_call(struct thread_smc_args *args,
781 				 uint32_t direct_resp_fid)
782 {
783 	uint32_t sec_caps = 0;
784 
785 	switch (args->a3) {
786 	case OPTEE_FFA_GET_API_VERSION:
787 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
788 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
789 			      0);
790 		break;
791 	case OPTEE_FFA_GET_OS_VERSION:
792 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
793 			      CFG_OPTEE_REVISION_MAJOR,
794 			      CFG_OPTEE_REVISION_MINOR,
795 			      TEE_IMPL_GIT_SHA1 >> 32);
796 		break;
797 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
798 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
799 		if (spmc_notif_is_ready)
800 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
801 		if (IS_ENABLED(CFG_RPMB_ANNOUNCE_PROBE_CAP))
802 			sec_caps |= OPTEE_FFA_SEC_CAP_RPMB_PROBE;
803 		spmc_set_args(args, direct_resp_fid,
804 			      swap_src_dst(args->a1), 0, 0,
805 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
806 		break;
807 	case OPTEE_FFA_UNREGISTER_SHM:
808 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
809 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
810 		break;
811 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
812 		spmc_set_args(args, direct_resp_fid,
813 			      swap_src_dst(args->a1), 0,
814 			      spmc_enable_async_notif(args->a4,
815 						      FFA_SRC(args->a1)),
816 			      0, 0);
817 		break;
818 	default:
819 		EMSG("Unhandled blocking service ID %#"PRIx32,
820 		     (uint32_t)args->a3);
821 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
822 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
823 	}
824 }
825 
826 static void handle_framework_direct_request(struct thread_smc_args *args,
827 					    struct ffa_rxtx *rxtx,
828 					    uint32_t direct_resp_fid)
829 {
830 	uint32_t w0 = FFA_ERROR;
831 	uint32_t w1 = FFA_PARAM_MBZ;
832 	uint32_t w2 = FFA_NOT_SUPPORTED;
833 	uint32_t w3 = FFA_PARAM_MBZ;
834 
835 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
836 	case FFA_MSG_SEND_VM_CREATED:
837 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
838 			uint16_t guest_id = args->a5;
839 			TEE_Result res = virt_guest_created(guest_id);
840 
841 			w0 = direct_resp_fid;
842 			w1 = swap_src_dst(args->a1);
843 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
844 			if (res == TEE_SUCCESS)
845 				w3 = FFA_OK;
846 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
847 				w3 = FFA_DENIED;
848 			else
849 				w3 = FFA_INVALID_PARAMETERS;
850 		}
851 		break;
852 	case FFA_MSG_SEND_VM_DESTROYED:
853 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
854 			uint16_t guest_id = args->a5;
855 			TEE_Result res = virt_guest_destroyed(guest_id);
856 
857 			w0 = direct_resp_fid;
858 			w1 = swap_src_dst(args->a1);
859 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
860 			if (res == TEE_SUCCESS)
861 				w3 = FFA_OK;
862 			else
863 				w3 = FFA_INVALID_PARAMETERS;
864 		}
865 		break;
866 	case FFA_MSG_VERSION_REQ:
867 		w0 = direct_resp_fid;
868 		w1 = swap_src_dst(args->a1);
869 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
870 		w3 = spmc_exchange_version(args->a3, rxtx);
871 		break;
872 	default:
873 		break;
874 	}
875 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
876 }
877 
878 static void handle_direct_request(struct thread_smc_args *args,
879 				  struct ffa_rxtx *rxtx)
880 {
881 	uint32_t direct_resp_fid = 0;
882 
883 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
884 	    FFA_DST(args->a1) != spmc_id &&
885 	    FFA_DST(args->a1) != optee_endpoint_id) {
886 		spmc_sp_start_thread(args);
887 		return;
888 	}
889 
890 	if (OPTEE_SMC_IS_64(args->a0))
891 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
892 	else
893 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
894 
895 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
896 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
897 		return;
898 	}
899 
900 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
901 	    virt_set_guest(get_sender_id(args->a1))) {
902 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
903 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
904 		return;
905 	}
906 
907 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
908 		handle_yielding_call(args, direct_resp_fid);
909 	else
910 		handle_blocking_call(args, direct_resp_fid);
911 
912 	/*
913 	 * Note that handle_yielding_call() typically only returns if a
914 	 * thread cannot be allocated or found. virt_unset_guest() is also
915 	 * called from thread_state_suspend() and thread_state_free().
916 	 */
917 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
918 		virt_unset_guest();
919 }
920 
921 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
922 			      struct ffa_mem_transaction_x *trans)
923 {
924 	uint16_t mem_reg_attr = 0;
925 	uint32_t flags = 0;
926 	uint32_t count = 0;
927 	uint32_t offs = 0;
928 	uint32_t size = 0;
929 	size_t n = 0;
930 
931 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
932 		return FFA_INVALID_PARAMETERS;
933 
934 	if (ffa_vers >= FFA_VERSION_1_1) {
935 		struct ffa_mem_transaction_1_1 *descr = NULL;
936 
937 		if (blen < sizeof(*descr))
938 			return FFA_INVALID_PARAMETERS;
939 
940 		descr = buf;
941 		trans->sender_id = READ_ONCE(descr->sender_id);
942 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
943 		flags = READ_ONCE(descr->flags);
944 		trans->global_handle = READ_ONCE(descr->global_handle);
945 		trans->tag = READ_ONCE(descr->tag);
946 
947 		count = READ_ONCE(descr->mem_access_count);
948 		size = READ_ONCE(descr->mem_access_size);
949 		offs = READ_ONCE(descr->mem_access_offs);
950 	} else {
951 		struct ffa_mem_transaction_1_0 *descr = NULL;
952 
953 		if (blen < sizeof(*descr))
954 			return FFA_INVALID_PARAMETERS;
955 
956 		descr = buf;
957 		trans->sender_id = READ_ONCE(descr->sender_id);
958 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
959 		flags = READ_ONCE(descr->flags);
960 		trans->global_handle = READ_ONCE(descr->global_handle);
961 		trans->tag = READ_ONCE(descr->tag);
962 
963 		count = READ_ONCE(descr->mem_access_count);
964 		size = sizeof(struct ffa_mem_access);
965 		offs = offsetof(struct ffa_mem_transaction_1_0,
966 				mem_access_array);
967 	}
968 
969 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
970 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
971 		return FFA_INVALID_PARAMETERS;
972 
973 	/* Check that the endpoint memory access descriptor array fits */
974 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
975 	    n > blen)
976 		return FFA_INVALID_PARAMETERS;
977 
978 	trans->mem_reg_attr = mem_reg_attr;
979 	trans->flags = flags;
980 	trans->mem_access_size = size;
981 	trans->mem_access_count = count;
982 	trans->mem_access_offs = offs;
983 	return 0;
984 }
985 
986 #if defined(CFG_CORE_SEL1_SPMC)
987 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
988 			 unsigned int mem_access_count, uint8_t *acc_perms,
989 			 unsigned int *region_offs)
990 {
991 	struct ffa_mem_access_perm *descr = NULL;
992 	struct ffa_mem_access *mem_acc = NULL;
993 	unsigned int n = 0;
994 
995 	for (n = 0; n < mem_access_count; n++) {
996 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
997 		descr = &mem_acc->access_perm;
998 		if (READ_ONCE(descr->endpoint_id) == optee_endpoint_id) {
999 			*acc_perms = READ_ONCE(descr->perm);
1000 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
1001 			return 0;
1002 		}
1003 	}
1004 
1005 	return FFA_INVALID_PARAMETERS;
1006 }
1007 
1008 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
1009 			  size_t blen, unsigned int *page_count,
1010 			  unsigned int *region_count, size_t *addr_range_offs)
1011 {
1012 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1013 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
1014 	struct ffa_mem_region *region_descr = NULL;
1015 	unsigned int region_descr_offs = 0;
1016 	uint8_t mem_acc_perm = 0;
1017 	size_t n = 0;
1018 
1019 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
1020 		return FFA_INVALID_PARAMETERS;
1021 
1022 	/* Check that the access permissions matches what's expected */
1023 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
1024 			  mem_trans->mem_access_size,
1025 			  mem_trans->mem_access_count,
1026 			  &mem_acc_perm, &region_descr_offs) ||
1027 	    mem_acc_perm != exp_mem_acc_perm)
1028 		return FFA_INVALID_PARAMETERS;
1029 
1030 	/* Check that the Composite memory region descriptor fits */
1031 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
1032 	    n > blen)
1033 		return FFA_INVALID_PARAMETERS;
1034 
1035 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
1036 				  struct ffa_mem_region))
1037 		return FFA_INVALID_PARAMETERS;
1038 
1039 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
1040 						 region_descr_offs);
1041 	*page_count = READ_ONCE(region_descr->total_page_count);
1042 	*region_count = READ_ONCE(region_descr->address_range_count);
1043 	*addr_range_offs = n;
1044 	return 0;
1045 }
1046 
1047 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1048 				size_t flen)
1049 {
1050 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1051 	struct ffa_address_range *arange = NULL;
1052 	unsigned int n = 0;
1053 
1054 	if (region_count > s->region_count)
1055 		region_count = s->region_count;
1056 
1057 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1058 		return FFA_INVALID_PARAMETERS;
1059 	arange = buf;
1060 
1061 	for (n = 0; n < region_count; n++) {
1062 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1063 		uint64_t addr = READ_ONCE(arange[n].address);
1064 
1065 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1066 					  addr, page_count))
1067 			return FFA_INVALID_PARAMETERS;
1068 	}
1069 
1070 	s->region_count -= region_count;
1071 	if (s->region_count)
1072 		return region_count * sizeof(*arange);
1073 
1074 	if (s->current_page_idx != s->page_count)
1075 		return FFA_INVALID_PARAMETERS;
1076 
1077 	return 0;
1078 }
1079 
1080 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1081 {
1082 	int rc = 0;
1083 
1084 	rc = add_mem_share_helper(&s->share, buf, flen);
1085 	if (rc >= 0) {
1086 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1087 			/* We're not at the end of the descriptor yet */
1088 			if (s->share.region_count)
1089 				return s->frag_offset;
1090 
1091 			/* We're done */
1092 			rc = 0;
1093 		} else {
1094 			rc = FFA_INVALID_PARAMETERS;
1095 		}
1096 	}
1097 
1098 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1099 	if (rc < 0)
1100 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1101 	else
1102 		mobj_ffa_push_to_inactive(s->share.mf);
1103 	free(s);
1104 
1105 	return rc;
1106 }
1107 
1108 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1109 			void *buf)
1110 {
1111 	struct ffa_mem_access_perm *perm = NULL;
1112 	struct ffa_mem_access *mem_acc = NULL;
1113 
1114 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1115 		return false;
1116 
1117 	if (mem_trans->mem_access_count < 1)
1118 		return false;
1119 
1120 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1121 	perm = &mem_acc->access_perm;
1122 
1123 	/*
1124 	 * perm->endpoint_id is read here only to check if the endpoint is
1125 	 * OP-TEE. We do read it later on again, but there are some additional
1126 	 * checks there to make sure that the data is correct.
1127 	 */
1128 	return READ_ONCE(perm->endpoint_id) != optee_endpoint_id;
1129 }
1130 
1131 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1132 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1133 			 size_t flen, uint64_t *global_handle)
1134 {
1135 	int rc = 0;
1136 	struct mem_share_state share = { };
1137 	size_t addr_range_offs = 0;
1138 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1139 	size_t n = 0;
1140 
1141 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1142 			    &share.region_count, &addr_range_offs);
1143 	if (rc)
1144 		return rc;
1145 
1146 	if (!share.page_count || !share.region_count)
1147 		return FFA_INVALID_PARAMETERS;
1148 
1149 	if (MUL_OVERFLOW(share.region_count,
1150 			 sizeof(struct ffa_address_range), &n) ||
1151 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1152 		return FFA_INVALID_PARAMETERS;
1153 
1154 	if (mem_trans->global_handle)
1155 		cookie = mem_trans->global_handle;
1156 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1157 	if (!share.mf)
1158 		return FFA_NO_MEMORY;
1159 
1160 	if (flen != blen) {
1161 		struct mem_frag_state *s = calloc(1, sizeof(*s));
1162 
1163 		if (!s) {
1164 			rc = FFA_NO_MEMORY;
1165 			goto err;
1166 		}
1167 		s->share = share;
1168 		s->mm = mm;
1169 		s->frag_offset = addr_range_offs;
1170 
1171 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1172 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1173 					flen - addr_range_offs);
1174 
1175 		if (rc >= 0)
1176 			*global_handle = mobj_ffa_get_cookie(share.mf);
1177 
1178 		return rc;
1179 	}
1180 
1181 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1182 				  flen - addr_range_offs);
1183 	if (rc) {
1184 		/*
1185 		 * Number of consumed bytes may be returned instead of 0 for
1186 		 * done.
1187 		 */
1188 		rc = FFA_INVALID_PARAMETERS;
1189 		goto err;
1190 	}
1191 
1192 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1193 
1194 	return 0;
1195 err:
1196 	mobj_ffa_sel1_spmc_delete(share.mf);
1197 	return rc;
1198 }
1199 
1200 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1201 				 unsigned int page_count,
1202 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1203 {
1204 	struct ffa_mem_transaction_x mem_trans = { };
1205 	int rc = 0;
1206 	size_t len = 0;
1207 	void *buf = NULL;
1208 	tee_mm_entry_t *mm = NULL;
1209 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1210 
1211 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1212 		return FFA_INVALID_PARAMETERS;
1213 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1214 		return FFA_INVALID_PARAMETERS;
1215 
1216 	/*
1217 	 * Check that the length reported in flen is covered by len even
1218 	 * if the offset is taken into account.
1219 	 */
1220 	if (len < flen || len - offs < flen)
1221 		return FFA_INVALID_PARAMETERS;
1222 
1223 	mm = tee_mm_alloc(&core_virt_shm_pool, len);
1224 	if (!mm)
1225 		return FFA_NO_MEMORY;
1226 
1227 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1228 					  page_count, MEM_AREA_NSEC_SHM)) {
1229 		rc = FFA_INVALID_PARAMETERS;
1230 		goto out;
1231 	}
1232 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1233 
1234 	cpu_spin_lock(&rxtx->spinlock);
1235 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1236 	if (rc)
1237 		goto unlock;
1238 
1239 	if (is_sp_share(&mem_trans, buf)) {
1240 		rc = spmc_sp_add_share(&mem_trans, buf, blen, flen,
1241 				       global_handle, NULL);
1242 		goto unlock;
1243 	}
1244 
1245 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1246 	    virt_set_guest(mem_trans.sender_id)) {
1247 		rc = FFA_DENIED;
1248 		goto unlock;
1249 	}
1250 
1251 	rc = add_mem_share(&mem_trans, mm, buf, blen, flen, global_handle);
1252 
1253 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1254 		virt_unset_guest();
1255 
1256 unlock:
1257 	cpu_spin_unlock(&rxtx->spinlock);
1258 	if (rc > 0)
1259 		return rc;
1260 
1261 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1262 out:
1263 	tee_mm_free(mm);
1264 	return rc;
1265 }
1266 
1267 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1268 				  uint64_t *global_handle,
1269 				  struct ffa_rxtx *rxtx)
1270 {
1271 	struct ffa_mem_transaction_x mem_trans = { };
1272 	int rc = FFA_DENIED;
1273 
1274 	cpu_spin_lock(&rxtx->spinlock);
1275 
1276 	if (!rxtx->rx || flen > rxtx->size)
1277 		goto out;
1278 
1279 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1280 				       &mem_trans);
1281 	if (rc)
1282 		goto out;
1283 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1284 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen, flen,
1285 				       global_handle, NULL);
1286 		goto out;
1287 	}
1288 
1289 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1290 	    virt_set_guest(mem_trans.sender_id))
1291 		goto out;
1292 
1293 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1294 			   global_handle);
1295 
1296 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1297 		virt_unset_guest();
1298 
1299 out:
1300 	cpu_spin_unlock(&rxtx->spinlock);
1301 
1302 	return rc;
1303 }
1304 
1305 static void handle_mem_share(struct thread_smc_args *args,
1306 			     struct ffa_rxtx *rxtx)
1307 {
1308 	uint32_t tot_len = args->a1;
1309 	uint32_t frag_len = args->a2;
1310 	uint64_t addr = args->a3;
1311 	uint32_t page_count = args->a4;
1312 	uint32_t ret_w1 = 0;
1313 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1314 	uint32_t ret_w3 = 0;
1315 	uint32_t ret_fid = FFA_ERROR;
1316 	uint64_t global_handle = 0;
1317 	int rc = 0;
1318 
1319 	/* Check that the MBZs are indeed 0 */
1320 	if (args->a5 || args->a6 || args->a7)
1321 		goto out;
1322 
1323 	/* Check that fragment length doesn't exceed total length */
1324 	if (frag_len > tot_len)
1325 		goto out;
1326 
1327 	/* Check for 32-bit calling convention */
1328 	if (args->a0 == FFA_MEM_SHARE_32)
1329 		addr &= UINT32_MAX;
1330 
1331 	if (!addr) {
1332 		/*
1333 		 * The memory transaction descriptor is passed via our rx
1334 		 * buffer.
1335 		 */
1336 		if (page_count)
1337 			goto out;
1338 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1339 					    rxtx);
1340 	} else {
1341 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1342 					   &global_handle, rxtx);
1343 	}
1344 	if (rc < 0) {
1345 		ret_w2 = rc;
1346 	} else if (rc > 0) {
1347 		ret_fid = FFA_MEM_FRAG_RX;
1348 		ret_w3 = rc;
1349 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1350 	} else {
1351 		ret_fid = FFA_SUCCESS_32;
1352 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1353 	}
1354 out:
1355 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1356 }
1357 
1358 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1359 {
1360 	struct mem_frag_state *s = NULL;
1361 
1362 	SLIST_FOREACH(s, &frag_state_head, link)
1363 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1364 			return s;
1365 
1366 	return NULL;
1367 }
1368 
1369 static void handle_mem_frag_tx(struct thread_smc_args *args,
1370 			       struct ffa_rxtx *rxtx)
1371 {
1372 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1373 	size_t flen = args->a3;
1374 	uint32_t endpoint_id = args->a4;
1375 	struct mem_frag_state *s = NULL;
1376 	tee_mm_entry_t *mm = NULL;
1377 	unsigned int page_count = 0;
1378 	void *buf = NULL;
1379 	uint32_t ret_w1 = 0;
1380 	uint32_t ret_w2 = 0;
1381 	uint32_t ret_w3 = 0;
1382 	uint32_t ret_fid = 0;
1383 	int rc = 0;
1384 
1385 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1386 		uint16_t guest_id = endpoint_id >> 16;
1387 
1388 		if (!guest_id || virt_set_guest(guest_id)) {
1389 			rc = FFA_INVALID_PARAMETERS;
1390 			goto out_set_rc;
1391 		}
1392 	}
1393 
1394 	/*
1395 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1396 	 * requests.
1397 	 */
1398 
1399 	cpu_spin_lock(&rxtx->spinlock);
1400 
1401 	s = get_frag_state(global_handle);
1402 	if (!s) {
1403 		rc = FFA_INVALID_PARAMETERS;
1404 		goto out;
1405 	}
1406 
1407 	mm = s->mm;
1408 	if (mm) {
1409 		if (flen > tee_mm_get_bytes(mm)) {
1410 			rc = FFA_INVALID_PARAMETERS;
1411 			goto out;
1412 		}
1413 		page_count = s->share.page_count;
1414 		buf = (void *)tee_mm_get_smem(mm);
1415 	} else {
1416 		if (flen > rxtx->size) {
1417 			rc = FFA_INVALID_PARAMETERS;
1418 			goto out;
1419 		}
1420 		buf = rxtx->rx;
1421 	}
1422 
1423 	rc = add_mem_share_frag(s, buf, flen);
1424 out:
1425 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1426 		virt_unset_guest();
1427 
1428 	cpu_spin_unlock(&rxtx->spinlock);
1429 
1430 	if (rc <= 0 && mm) {
1431 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1432 		tee_mm_free(mm);
1433 	}
1434 
1435 out_set_rc:
1436 	if (rc < 0) {
1437 		ret_fid = FFA_ERROR;
1438 		ret_w2 = rc;
1439 	} else if (rc > 0) {
1440 		ret_fid = FFA_MEM_FRAG_RX;
1441 		ret_w3 = rc;
1442 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1443 	} else {
1444 		ret_fid = FFA_SUCCESS_32;
1445 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1446 	}
1447 
1448 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1449 }
1450 
1451 static void handle_mem_reclaim(struct thread_smc_args *args)
1452 {
1453 	int rc = FFA_INVALID_PARAMETERS;
1454 	uint64_t cookie = 0;
1455 
1456 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1457 		goto out;
1458 
1459 	cookie = reg_pair_to_64(args->a2, args->a1);
1460 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1461 		uint16_t guest_id = 0;
1462 
1463 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1464 			guest_id = virt_find_guest_by_cookie(cookie);
1465 		} else {
1466 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1467 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1468 		}
1469 		if (!guest_id)
1470 			goto out;
1471 		if (virt_set_guest(guest_id)) {
1472 			if (!virt_reclaim_cookie_from_destroyed_guest(guest_id,
1473 								      cookie))
1474 				rc = FFA_OK;
1475 			goto out;
1476 		}
1477 	}
1478 
1479 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1480 	case TEE_SUCCESS:
1481 		rc = FFA_OK;
1482 		break;
1483 	case TEE_ERROR_ITEM_NOT_FOUND:
1484 		DMSG("cookie %#"PRIx64" not found", cookie);
1485 		rc = FFA_INVALID_PARAMETERS;
1486 		break;
1487 	default:
1488 		DMSG("cookie %#"PRIx64" busy", cookie);
1489 		rc = FFA_DENIED;
1490 		break;
1491 	}
1492 
1493 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
1494 		virt_unset_guest();
1495 
1496 out:
1497 	set_simple_ret_val(args, rc);
1498 }
1499 
1500 static void handle_notification_bitmap_create(struct thread_smc_args *args)
1501 {
1502 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1503 	uint32_t ret_fid = FFA_ERROR;
1504 	uint32_t old_itr_status = 0;
1505 
1506 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1507 	    !args->a5 && !args->a6 && !args->a7) {
1508 		struct guest_partition *prtn = NULL;
1509 		struct notif_vm_bitmap *nvb = NULL;
1510 		uint16_t vm_id = args->a1;
1511 
1512 		prtn = virt_get_guest(vm_id);
1513 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1514 		if (!nvb) {
1515 			ret_val = FFA_INVALID_PARAMETERS;
1516 			goto out_virt_put;
1517 		}
1518 
1519 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1520 
1521 		if (nvb->initialized) {
1522 			ret_val = FFA_DENIED;
1523 			goto out_unlock;
1524 		}
1525 
1526 		nvb->initialized = true;
1527 		nvb->do_bottom_half_value = -1;
1528 		ret_val = FFA_OK;
1529 		ret_fid = FFA_SUCCESS_32;
1530 out_unlock:
1531 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1532 out_virt_put:
1533 		virt_put_guest(prtn);
1534 	}
1535 
1536 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1537 }
1538 
1539 static void handle_notification_bitmap_destroy(struct thread_smc_args *args)
1540 {
1541 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1542 	uint32_t ret_fid = FFA_ERROR;
1543 	uint32_t old_itr_status = 0;
1544 
1545 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1546 	    !args->a5 && !args->a6 && !args->a7) {
1547 		struct guest_partition *prtn = NULL;
1548 		struct notif_vm_bitmap *nvb = NULL;
1549 		uint16_t vm_id = args->a1;
1550 
1551 		prtn = virt_get_guest(vm_id);
1552 		nvb = get_notif_vm_bitmap(prtn, vm_id);
1553 		if (!nvb) {
1554 			ret_val = FFA_INVALID_PARAMETERS;
1555 			goto out_virt_put;
1556 		}
1557 
1558 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1559 
1560 		if (nvb->pending || nvb->bound) {
1561 			ret_val = FFA_DENIED;
1562 			goto out_unlock;
1563 		}
1564 
1565 		memset(nvb, 0, sizeof(*nvb));
1566 		ret_val = FFA_OK;
1567 		ret_fid = FFA_SUCCESS_32;
1568 out_unlock:
1569 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1570 out_virt_put:
1571 		virt_put_guest(prtn);
1572 	}
1573 
1574 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1575 }
1576 
1577 static void handle_notification_bind(struct thread_smc_args *args)
1578 {
1579 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1580 	struct guest_partition *prtn = NULL;
1581 	struct notif_vm_bitmap *nvb = NULL;
1582 	uint32_t ret_fid = FFA_ERROR;
1583 	uint32_t old_itr_status = 0;
1584 	uint64_t bitmap = 0;
1585 	uint16_t vm_id = 0;
1586 
1587 	if (args->a5 || args->a6 || args->a7)
1588 		goto out;
1589 	if (args->a2) {
1590 		/* We only deal with global notifications */
1591 		ret_val = FFA_DENIED;
1592 		goto out;
1593 	}
1594 
1595 	/* The destination of the eventual notification */
1596 	vm_id = FFA_DST(args->a1);
1597 	bitmap = reg_pair_to_64(args->a4, args->a3);
1598 
1599 	prtn = virt_get_guest(vm_id);
1600 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1601 	if (!nvb) {
1602 		ret_val = FFA_INVALID_PARAMETERS;
1603 		goto out_virt_put;
1604 	}
1605 
1606 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1607 
1608 	if ((bitmap & nvb->bound)) {
1609 		ret_val = FFA_DENIED;
1610 	} else {
1611 		nvb->bound |= bitmap;
1612 		ret_val = FFA_OK;
1613 		ret_fid = FFA_SUCCESS_32;
1614 	}
1615 
1616 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1617 out_virt_put:
1618 	virt_put_guest(prtn);
1619 out:
1620 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1621 }
1622 
1623 static void handle_notification_unbind(struct thread_smc_args *args)
1624 {
1625 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1626 	struct guest_partition *prtn = NULL;
1627 	struct notif_vm_bitmap *nvb = NULL;
1628 	uint32_t ret_fid = FFA_ERROR;
1629 	uint32_t old_itr_status = 0;
1630 	uint64_t bitmap = 0;
1631 	uint16_t vm_id = 0;
1632 
1633 	if (args->a2 || args->a5 || args->a6 || args->a7)
1634 		goto out;
1635 
1636 	/* The destination of the eventual notification */
1637 	vm_id = FFA_DST(args->a1);
1638 	bitmap = reg_pair_to_64(args->a4, args->a3);
1639 
1640 	prtn = virt_get_guest(vm_id);
1641 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1642 	if (!nvb) {
1643 		ret_val = FFA_INVALID_PARAMETERS;
1644 		goto out_virt_put;
1645 	}
1646 
1647 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1648 
1649 	if (bitmap & nvb->pending) {
1650 		ret_val = FFA_DENIED;
1651 	} else {
1652 		nvb->bound &= ~bitmap;
1653 		ret_val = FFA_OK;
1654 		ret_fid = FFA_SUCCESS_32;
1655 	}
1656 
1657 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1658 out_virt_put:
1659 	virt_put_guest(prtn);
1660 out:
1661 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1662 }
1663 
1664 static void handle_notification_get(struct thread_smc_args *args)
1665 {
1666 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1667 	struct guest_partition *prtn = NULL;
1668 	struct notif_vm_bitmap *nvb = NULL;
1669 	uint32_t ret_fid = FFA_ERROR;
1670 	uint32_t old_itr_status = 0;
1671 	uint16_t vm_id = 0;
1672 	uint32_t w3 = 0;
1673 
1674 	if (args->a5 || args->a6 || args->a7)
1675 		goto out;
1676 	if (!(args->a2 & 0x1)) {
1677 		ret_fid = FFA_SUCCESS_32;
1678 		w2 = 0;
1679 		goto out;
1680 	}
1681 	vm_id = FFA_DST(args->a1);
1682 
1683 	prtn = virt_get_guest(vm_id);
1684 	nvb = get_notif_vm_bitmap(prtn, vm_id);
1685 	if (!nvb)
1686 		goto out_virt_put;
1687 
1688 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1689 
1690 	reg_pair_from_64(nvb->pending, &w3, &w2);
1691 	nvb->pending = 0;
1692 	ret_fid = FFA_SUCCESS_32;
1693 
1694 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1695 out_virt_put:
1696 	virt_put_guest(prtn);
1697 out:
1698 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1699 }
1700 
1701 struct notif_info_get_state {
1702 	struct thread_smc_args *args;
1703 	unsigned int ids_per_reg;
1704 	unsigned int ids_count;
1705 	unsigned int id_pos;
1706 	unsigned int count;
1707 	unsigned int max_list_count;
1708 	unsigned int list_count;
1709 };
1710 
1711 static unsigned long get_smc_arg(struct thread_smc_args *args, unsigned int idx)
1712 {
1713 	switch (idx) {
1714 	case 0:
1715 		return args->a0;
1716 	case 1:
1717 		return args->a1;
1718 	case 2:
1719 		return args->a2;
1720 	case 3:
1721 		return args->a3;
1722 	case 4:
1723 		return args->a4;
1724 	case 5:
1725 		return args->a5;
1726 	case 6:
1727 		return args->a6;
1728 	case 7:
1729 		return args->a7;
1730 	default:
1731 		assert(0);
1732 		return 0;
1733 	}
1734 }
1735 
1736 static void set_smc_arg(struct thread_smc_args *args, unsigned int idx,
1737 			unsigned long val)
1738 {
1739 	switch (idx) {
1740 	case 0:
1741 		args->a0 = val;
1742 		break;
1743 	case 1:
1744 		args->a1 = val;
1745 		break;
1746 	case 2:
1747 		args->a2 = val;
1748 		break;
1749 	case 3:
1750 		args->a3 = val;
1751 		break;
1752 	case 4:
1753 		args->a4 = val;
1754 		break;
1755 	case 5:
1756 		args->a5 = val;
1757 		break;
1758 	case 6:
1759 		args->a6 = val;
1760 		break;
1761 	case 7:
1762 		args->a7 = val;
1763 		break;
1764 	default:
1765 		assert(0);
1766 	}
1767 }
1768 
1769 static bool add_id_in_regs(struct notif_info_get_state *state,
1770 			   uint16_t id)
1771 {
1772 	unsigned int reg_idx = state->id_pos / state->ids_per_reg + 3;
1773 	unsigned int reg_shift = (state->id_pos % state->ids_per_reg) * 16;
1774 	unsigned long v;
1775 
1776 	if (reg_idx > 7)
1777 		return false;
1778 
1779 	v = get_smc_arg(state->args, reg_idx);
1780 	v &= ~(0xffffUL << reg_shift);
1781 	v |= (unsigned long)id << reg_shift;
1782 	set_smc_arg(state->args, reg_idx, v);
1783 
1784 	state->id_pos++;
1785 	state->count++;
1786 	return true;
1787 }
1788 
1789 static bool add_id_count(struct notif_info_get_state *state)
1790 {
1791 	assert(state->list_count < state->max_list_count &&
1792 	       state->count >= 1 && state->count <= 4);
1793 
1794 	state->ids_count |= (state->count - 1) << (state->list_count * 2 + 12);
1795 	state->list_count++;
1796 	state->count = 0;
1797 
1798 	return state->list_count < state->max_list_count;
1799 }
1800 
1801 static bool add_nvb_to_state(struct notif_info_get_state *state,
1802 			     uint16_t guest_id, struct notif_vm_bitmap *nvb)
1803 {
1804 	if (!nvb->pending)
1805 		return true;
1806 	/*
1807 	 * Add only the guest_id, meaning a global notification for this
1808 	 * guest.
1809 	 *
1810 	 * If notifications for one or more specific vCPUs we'd add those
1811 	 * before calling add_id_count(), but that's not supported.
1812 	 */
1813 	return add_id_in_regs(state, guest_id) && add_id_count(state);
1814 }
1815 
1816 static void handle_notification_info_get(struct thread_smc_args *args)
1817 {
1818 	struct notif_info_get_state state = { .args = args };
1819 	uint32_t ffa_res = FFA_INVALID_PARAMETERS;
1820 	struct guest_partition *prtn = NULL;
1821 	struct notif_vm_bitmap *nvb = NULL;
1822 	uint32_t more_pending_flag = 0;
1823 	uint32_t itr_state = 0;
1824 	uint16_t guest_id = 0;
1825 
1826 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1827 	    args->a6 || args->a7)
1828 		goto err;
1829 
1830 	if (OPTEE_SMC_IS_64(args->a0)) {
1831 		spmc_set_args(args, FFA_SUCCESS_64, 0, 0, 0, 0, 0);
1832 		state.ids_per_reg = 4;
1833 		state.max_list_count = 31;
1834 	} else {
1835 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
1836 		state.ids_per_reg = 2;
1837 		state.max_list_count = 15;
1838 	}
1839 
1840 	while (true) {
1841 		/*
1842 		 * With NS-Virtualization we need to go through all
1843 		 * partitions to collect the notification bitmaps, without
1844 		 * we just check the only notification bitmap we have.
1845 		 */
1846 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1847 			prtn = virt_next_guest(prtn);
1848 			if (!prtn)
1849 				break;
1850 			guest_id = virt_get_guest_id(prtn);
1851 		}
1852 		nvb = get_notif_vm_bitmap(prtn, guest_id);
1853 
1854 		itr_state = cpu_spin_lock_xsave(&spmc_notif_lock);
1855 		if (!add_nvb_to_state(&state, guest_id, nvb))
1856 			more_pending_flag = BIT(0);
1857 		cpu_spin_unlock_xrestore(&spmc_notif_lock, itr_state);
1858 
1859 		if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || more_pending_flag)
1860 			break;
1861 	}
1862 	virt_put_guest(prtn);
1863 
1864 	if (!state.id_pos) {
1865 		ffa_res = FFA_NO_DATA;
1866 		goto err;
1867 	}
1868 	args->a2 = (state.list_count << FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT) |
1869 		   (state.ids_count << FFA_NOTIF_INFO_GET_ID_LIST_SHIFT) |
1870 		   more_pending_flag;
1871 	return;
1872 err:
1873 	spmc_set_args(args, FFA_ERROR, 0, ffa_res, 0, 0, 0);
1874 }
1875 
1876 void thread_spmc_set_async_notif_intid(int intid)
1877 {
1878 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1879 	notif_intid = intid;
1880 	spmc_notif_is_ready = true;
1881 	DMSG("Asynchronous notifications are ready");
1882 }
1883 
1884 void notif_send_async(uint32_t value, uint16_t guest_id)
1885 {
1886 	struct guest_partition *prtn = NULL;
1887 	struct notif_vm_bitmap *nvb = NULL;
1888 	uint32_t old_itr_status = 0;
1889 
1890 	prtn = virt_get_guest(guest_id);
1891 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1892 
1893 	if (nvb) {
1894 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1895 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1896 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0 &&
1897 		       notif_intid >= 0);
1898 		nvb->pending |= BIT64(nvb->do_bottom_half_value);
1899 		interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1900 				    ITR_CPU_MASK_TO_THIS_CPU);
1901 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1902 	}
1903 
1904 	virt_put_guest(prtn);
1905 }
1906 #else
1907 void notif_send_async(uint32_t value, uint16_t guest_id)
1908 {
1909 	struct guest_partition *prtn = NULL;
1910 	struct notif_vm_bitmap *nvb = NULL;
1911 	/* global notification, delay notification interrupt */
1912 	uint32_t flags = BIT32(1);
1913 	int res = 0;
1914 
1915 	prtn = virt_get_guest(guest_id);
1916 	nvb = get_notif_vm_bitmap(prtn, guest_id);
1917 
1918 	if (nvb) {
1919 		assert(value == NOTIF_VALUE_DO_BOTTOM_HALF &&
1920 		       spmc_notif_is_ready && nvb->do_bottom_half_value >= 0);
1921 		res = ffa_set_notification(guest_id, optee_endpoint_id, flags,
1922 					   BIT64(nvb->do_bottom_half_value));
1923 		if (res) {
1924 			EMSG("notification set failed with error %d", res);
1925 			panic();
1926 		}
1927 	}
1928 
1929 	virt_put_guest(prtn);
1930 }
1931 #endif
1932 
1933 /* Only called from assembly */
1934 void thread_spmc_msg_recv(struct thread_smc_args *args);
1935 void thread_spmc_msg_recv(struct thread_smc_args *args)
1936 {
1937 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1938 	switch (args->a0) {
1939 #if defined(CFG_CORE_SEL1_SPMC)
1940 	case FFA_FEATURES:
1941 		handle_features(args);
1942 		break;
1943 	case FFA_SPM_ID_GET:
1944 		spmc_handle_spm_id_get(args);
1945 		break;
1946 #ifdef ARM64
1947 	case FFA_RXTX_MAP_64:
1948 #endif
1949 	case FFA_RXTX_MAP_32:
1950 		spmc_handle_rxtx_map(args, &my_rxtx);
1951 		break;
1952 	case FFA_RXTX_UNMAP:
1953 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1954 		break;
1955 	case FFA_RX_RELEASE:
1956 		spmc_handle_rx_release(args, &my_rxtx);
1957 		break;
1958 	case FFA_PARTITION_INFO_GET:
1959 		spmc_handle_partition_info_get(args, &my_rxtx);
1960 		break;
1961 	case FFA_RUN:
1962 		spmc_handle_run(args);
1963 		break;
1964 #endif /*CFG_CORE_SEL1_SPMC*/
1965 	case FFA_INTERRUPT:
1966 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1967 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1968 				      0, 0);
1969 		else
1970 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1971 		break;
1972 #ifdef ARM64
1973 	case FFA_MSG_SEND_DIRECT_REQ_64:
1974 #endif
1975 	case FFA_MSG_SEND_DIRECT_REQ_32:
1976 		handle_direct_request(args, &my_rxtx);
1977 		break;
1978 #if defined(CFG_CORE_SEL1_SPMC)
1979 #ifdef ARM64
1980 	case FFA_MEM_SHARE_64:
1981 #endif
1982 	case FFA_MEM_SHARE_32:
1983 		handle_mem_share(args, &my_rxtx);
1984 		break;
1985 	case FFA_MEM_RECLAIM:
1986 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1987 		    !ffa_mem_reclaim(args, NULL))
1988 			handle_mem_reclaim(args);
1989 		break;
1990 	case FFA_MEM_FRAG_TX:
1991 		handle_mem_frag_tx(args, &my_rxtx);
1992 		break;
1993 	case FFA_NOTIFICATION_BITMAP_CREATE:
1994 		handle_notification_bitmap_create(args);
1995 		break;
1996 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1997 		handle_notification_bitmap_destroy(args);
1998 		break;
1999 	case FFA_NOTIFICATION_BIND:
2000 		handle_notification_bind(args);
2001 		break;
2002 	case FFA_NOTIFICATION_UNBIND:
2003 		handle_notification_unbind(args);
2004 		break;
2005 	case FFA_NOTIFICATION_GET:
2006 		handle_notification_get(args);
2007 		break;
2008 #ifdef ARM64
2009 	case FFA_NOTIFICATION_INFO_GET_64:
2010 #endif
2011 	case FFA_NOTIFICATION_INFO_GET_32:
2012 		handle_notification_info_get(args);
2013 		break;
2014 #endif /*CFG_CORE_SEL1_SPMC*/
2015 	case FFA_ERROR:
2016 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
2017 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
2018 			/*
2019 			 * The SPMC will return an FFA_ERROR back so better
2020 			 * panic() now than flooding the log.
2021 			 */
2022 			panic("FFA_ERROR from SPMC is fatal");
2023 		}
2024 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
2025 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
2026 		break;
2027 	default:
2028 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
2029 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
2030 	}
2031 }
2032 
2033 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
2034 {
2035 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2036 	struct thread_ctx *thr = threads + thread_get_id();
2037 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
2038 	struct optee_msg_arg *arg = NULL;
2039 	struct mobj *mobj = NULL;
2040 	uint32_t num_params = 0;
2041 	size_t sz = 0;
2042 
2043 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
2044 	if (!mobj) {
2045 		EMSG("Can't find cookie %#"PRIx64, cookie);
2046 		return TEE_ERROR_BAD_PARAMETERS;
2047 	}
2048 
2049 	res = mobj_inc_map(mobj);
2050 	if (res)
2051 		goto out_put_mobj;
2052 
2053 	res = TEE_ERROR_BAD_PARAMETERS;
2054 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
2055 	if (!arg)
2056 		goto out_dec_map;
2057 
2058 	num_params = READ_ONCE(arg->num_params);
2059 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
2060 		goto out_dec_map;
2061 
2062 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
2063 
2064 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
2065 	if (!thr->rpc_arg)
2066 		goto out_dec_map;
2067 
2068 	virt_on_stdcall();
2069 	res = tee_entry_std(arg, num_params);
2070 
2071 	thread_rpc_shm_cache_clear(&thr->shm_cache);
2072 	thr->rpc_arg = NULL;
2073 
2074 out_dec_map:
2075 	mobj_dec_map(mobj);
2076 out_put_mobj:
2077 	mobj_put(mobj);
2078 	return res;
2079 }
2080 
2081 /*
2082  * Helper routine for the assembly function thread_std_smc_entry()
2083  *
2084  * Note: this function is weak just to make link_dummies_paged.c happy.
2085  */
2086 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
2087 				       uint32_t a2, uint32_t a3,
2088 				       uint32_t a4, uint32_t a5 __unused)
2089 {
2090 	/*
2091 	 * Arguments are supplied from handle_yielding_call() as:
2092 	 * a0 <- w1
2093 	 * a1 <- w3
2094 	 * a2 <- w4
2095 	 * a3 <- w5
2096 	 * a4 <- w6
2097 	 * a5 <- w7
2098 	 */
2099 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
2100 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
2101 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
2102 	return FFA_DENIED;
2103 }
2104 
2105 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
2106 {
2107 	uint64_t offs = tpm->u.memref.offs;
2108 
2109 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
2110 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
2111 
2112 	param->u.fmem.offs_low = offs;
2113 	param->u.fmem.offs_high = offs >> 32;
2114 	if (param->u.fmem.offs_high != offs >> 32)
2115 		return false;
2116 
2117 	param->u.fmem.size = tpm->u.memref.size;
2118 	if (tpm->u.memref.mobj) {
2119 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
2120 
2121 		/* If a mobj is passed it better be one with a valid cookie. */
2122 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
2123 			return false;
2124 		param->u.fmem.global_id = cookie;
2125 	} else {
2126 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
2127 	}
2128 
2129 	return true;
2130 }
2131 
2132 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
2133 			    struct thread_param *params,
2134 			    struct optee_msg_arg **arg_ret)
2135 {
2136 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
2137 	struct thread_ctx *thr = threads + thread_get_id();
2138 	struct optee_msg_arg *arg = thr->rpc_arg;
2139 
2140 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
2141 		return TEE_ERROR_BAD_PARAMETERS;
2142 
2143 	if (!arg) {
2144 		EMSG("rpc_arg not set");
2145 		return TEE_ERROR_GENERIC;
2146 	}
2147 
2148 	memset(arg, 0, sz);
2149 	arg->cmd = cmd;
2150 	arg->num_params = num_params;
2151 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
2152 
2153 	for (size_t n = 0; n < num_params; n++) {
2154 		switch (params[n].attr) {
2155 		case THREAD_PARAM_ATTR_NONE:
2156 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
2157 			break;
2158 		case THREAD_PARAM_ATTR_VALUE_IN:
2159 		case THREAD_PARAM_ATTR_VALUE_OUT:
2160 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2161 			arg->params[n].attr = params[n].attr -
2162 					      THREAD_PARAM_ATTR_VALUE_IN +
2163 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
2164 			arg->params[n].u.value.a = params[n].u.value.a;
2165 			arg->params[n].u.value.b = params[n].u.value.b;
2166 			arg->params[n].u.value.c = params[n].u.value.c;
2167 			break;
2168 		case THREAD_PARAM_ATTR_MEMREF_IN:
2169 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2170 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2171 			if (!set_fmem(arg->params + n, params + n))
2172 				return TEE_ERROR_BAD_PARAMETERS;
2173 			break;
2174 		default:
2175 			return TEE_ERROR_BAD_PARAMETERS;
2176 		}
2177 	}
2178 
2179 	if (arg_ret)
2180 		*arg_ret = arg;
2181 
2182 	return TEE_SUCCESS;
2183 }
2184 
2185 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
2186 				struct thread_param *params)
2187 {
2188 	for (size_t n = 0; n < num_params; n++) {
2189 		switch (params[n].attr) {
2190 		case THREAD_PARAM_ATTR_VALUE_OUT:
2191 		case THREAD_PARAM_ATTR_VALUE_INOUT:
2192 			params[n].u.value.a = arg->params[n].u.value.a;
2193 			params[n].u.value.b = arg->params[n].u.value.b;
2194 			params[n].u.value.c = arg->params[n].u.value.c;
2195 			break;
2196 		case THREAD_PARAM_ATTR_MEMREF_OUT:
2197 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
2198 			params[n].u.memref.size = arg->params[n].u.fmem.size;
2199 			break;
2200 		default:
2201 			break;
2202 		}
2203 	}
2204 
2205 	return arg->ret;
2206 }
2207 
2208 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
2209 			struct thread_param *params)
2210 {
2211 	struct thread_rpc_arg rpc_arg = { .call = {
2212 			.w1 = thread_get_tsd()->rpc_target_info,
2213 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2214 		},
2215 	};
2216 	struct optee_msg_arg *arg = NULL;
2217 	uint32_t ret = 0;
2218 
2219 	ret = get_rpc_arg(cmd, num_params, params, &arg);
2220 	if (ret)
2221 		return ret;
2222 
2223 	thread_rpc(&rpc_arg);
2224 
2225 	return get_rpc_arg_res(arg, num_params, params);
2226 }
2227 
2228 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
2229 {
2230 	struct thread_rpc_arg rpc_arg = { .call = {
2231 			.w1 = thread_get_tsd()->rpc_target_info,
2232 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2233 		},
2234 	};
2235 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
2236 	uint32_t res2 = 0;
2237 	uint32_t res = 0;
2238 
2239 	DMSG("freeing cookie %#"PRIx64, cookie);
2240 
2241 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
2242 
2243 	mobj_put(mobj);
2244 	res2 = mobj_ffa_unregister_by_cookie(cookie);
2245 	if (res2)
2246 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
2247 		     cookie, res2);
2248 	if (!res)
2249 		thread_rpc(&rpc_arg);
2250 }
2251 
2252 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
2253 {
2254 	struct thread_rpc_arg rpc_arg = { .call = {
2255 			.w1 = thread_get_tsd()->rpc_target_info,
2256 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
2257 		},
2258 	};
2259 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
2260 	struct optee_msg_arg *arg = NULL;
2261 	unsigned int internal_offset = 0;
2262 	struct mobj *mobj = NULL;
2263 	uint64_t cookie = 0;
2264 
2265 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
2266 		return NULL;
2267 
2268 	thread_rpc(&rpc_arg);
2269 
2270 	if (arg->num_params != 1 ||
2271 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
2272 		return NULL;
2273 
2274 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
2275 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2276 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2277 	if (!mobj) {
2278 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2279 		     cookie, internal_offset);
2280 		return NULL;
2281 	}
2282 
2283 	assert(mobj_is_nonsec(mobj));
2284 
2285 	if (mobj->size < size) {
2286 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2287 		mobj_put(mobj);
2288 		return NULL;
2289 	}
2290 
2291 	if (mobj_inc_map(mobj)) {
2292 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2293 		mobj_put(mobj);
2294 		return NULL;
2295 	}
2296 
2297 	return mobj;
2298 }
2299 
2300 struct mobj *thread_rpc_alloc_payload(size_t size)
2301 {
2302 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2303 }
2304 
2305 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2306 {
2307 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2308 }
2309 
2310 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2311 {
2312 	if (mobj)
2313 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2314 				mobj_get_cookie(mobj), mobj);
2315 }
2316 
2317 void thread_rpc_free_payload(struct mobj *mobj)
2318 {
2319 	if (mobj)
2320 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2321 				mobj);
2322 }
2323 
2324 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2325 {
2326 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2327 }
2328 
2329 void thread_rpc_free_global_payload(struct mobj *mobj)
2330 {
2331 	if (mobj)
2332 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2333 				mobj_get_cookie(mobj), mobj);
2334 }
2335 
2336 void thread_spmc_register_secondary_ep(vaddr_t ep)
2337 {
2338 	unsigned long ret = 0;
2339 
2340 	/* Let the SPM know the entry point for secondary CPUs */
2341 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2342 
2343 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2344 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2345 }
2346 
2347 static uint16_t ffa_id_get(void)
2348 {
2349 	/*
2350 	 * Ask the SPM component running at a higher EL to return our FF-A ID.
2351 	 * This can either be the SPMC ID (if the SPMC is enabled in OP-TEE) or
2352 	 * the partition ID (if not).
2353 	 */
2354 	struct thread_smc_args args = {
2355 		.a0 = FFA_ID_GET,
2356 	};
2357 
2358 	thread_smccc(&args);
2359 	if (!is_ffa_success(args.a0)) {
2360 		if (args.a0 == FFA_ERROR)
2361 			EMSG("Get id failed with error %ld", args.a2);
2362 		else
2363 			EMSG("Get id failed");
2364 		panic();
2365 	}
2366 
2367 	return args.a2;
2368 }
2369 
2370 static uint16_t ffa_spm_id_get(void)
2371 {
2372 	/*
2373 	 * Ask the SPM component running at a higher EL to return its ID.
2374 	 * If OP-TEE implements the S-EL1 SPMC, this will get the SPMD ID.
2375 	 * If not, the ID of the SPMC will be returned.
2376 	 */
2377 	struct thread_smc_args args = {
2378 		.a0 = FFA_SPM_ID_GET,
2379 	};
2380 
2381 	thread_smccc(&args);
2382 	if (!is_ffa_success(args.a0)) {
2383 		if (args.a0 == FFA_ERROR)
2384 			EMSG("Get spm id failed with error %ld", args.a2);
2385 		else
2386 			EMSG("Get spm id failed");
2387 		panic();
2388 	}
2389 
2390 	return args.a2;
2391 }
2392 
2393 #if defined(CFG_CORE_SEL1_SPMC)
2394 static TEE_Result spmc_init(void)
2395 {
2396 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
2397 	    virt_add_guest_spec_data(&notif_vm_bitmap_id,
2398 				     sizeof(struct notif_vm_bitmap), NULL))
2399 		panic("virt_add_guest_spec_data");
2400 	spmd_id = ffa_spm_id_get();
2401 	DMSG("SPMD ID %#"PRIx16, spmd_id);
2402 
2403 	spmc_id = ffa_id_get();
2404 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2405 
2406 	optee_endpoint_id = FFA_SWD_ID_MIN;
2407 	while (optee_endpoint_id == spmd_id || optee_endpoint_id == spmc_id)
2408 		optee_endpoint_id++;
2409 
2410 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2411 
2412 	/*
2413 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2414 	 * normal world regardless of what version we query the SPM with.
2415 	 * However, if SPMD think we are version 1.1 it will forward
2416 	 * queries from normal world to let us negotiate version. So by
2417 	 * setting version 1.0 here we should be compatible.
2418 	 *
2419 	 * Note that disagreement on negotiated version means that we'll
2420 	 * have communication problems with normal world.
2421 	 */
2422 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2423 
2424 	return TEE_SUCCESS;
2425 }
2426 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2427 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2428 {
2429 	struct thread_smc_args args = {
2430 #ifdef ARM64
2431 		.a0 = FFA_RXTX_MAP_64,
2432 #else
2433 		.a0 = FFA_RXTX_MAP_32,
2434 #endif
2435 		.a1 = virt_to_phys(rxtx->tx),
2436 		.a2 = virt_to_phys(rxtx->rx),
2437 		.a3 = 1,
2438 	};
2439 
2440 	thread_smccc(&args);
2441 	if (!is_ffa_success(args.a0)) {
2442 		if (args.a0 == FFA_ERROR)
2443 			EMSG("rxtx map failed with error %ld", args.a2);
2444 		else
2445 			EMSG("rxtx map failed");
2446 		panic();
2447 	}
2448 }
2449 
2450 static uint32_t get_ffa_version(uint32_t my_version)
2451 {
2452 	struct thread_smc_args args = {
2453 		.a0 = FFA_VERSION,
2454 		.a1 = my_version,
2455 	};
2456 
2457 	thread_smccc(&args);
2458 	if (args.a0 & BIT(31)) {
2459 		EMSG("FF-A version failed with error %ld", args.a0);
2460 		panic();
2461 	}
2462 
2463 	return args.a0;
2464 }
2465 
2466 static void *spmc_retrieve_req(uint64_t cookie,
2467 			       struct ffa_mem_transaction_x *trans)
2468 {
2469 	struct ffa_mem_access *acc_descr_array = NULL;
2470 	struct ffa_mem_access_perm *perm_descr = NULL;
2471 	struct thread_smc_args args = {
2472 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2473 		.a3 =	0,	/* Address, Using TX -> MBZ */
2474 		.a4 =   0,	/* Using TX -> MBZ */
2475 	};
2476 	size_t size = 0;
2477 	int rc = 0;
2478 
2479 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2480 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2481 
2482 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2483 		memset(trans_descr, 0, size);
2484 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2485 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2486 		trans_descr->global_handle = cookie;
2487 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2488 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2489 		trans_descr->mem_access_count = 1;
2490 		acc_descr_array = trans_descr->mem_access_array;
2491 	} else {
2492 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2493 
2494 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2495 		memset(trans_descr, 0, size);
2496 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2497 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2498 		trans_descr->global_handle = cookie;
2499 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2500 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2501 		trans_descr->mem_access_count = 1;
2502 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2503 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2504 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2505 					   sizeof(*trans_descr));
2506 	}
2507 	acc_descr_array->region_offs = 0;
2508 	acc_descr_array->reserved = 0;
2509 	perm_descr = &acc_descr_array->access_perm;
2510 	perm_descr->endpoint_id = optee_endpoint_id;
2511 	perm_descr->perm = FFA_MEM_ACC_RW;
2512 	perm_descr->flags = 0;
2513 
2514 	args.a1 = size; /* Total Length */
2515 	args.a2 = size; /* Frag Length == Total length */
2516 	thread_smccc(&args);
2517 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2518 		if (args.a0 == FFA_ERROR)
2519 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2520 			     cookie, (int)args.a2);
2521 		else
2522 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2523 			     cookie, args.a0);
2524 		return NULL;
2525 	}
2526 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2527 				       my_rxtx.size, trans);
2528 	if (rc) {
2529 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2530 		     cookie, rc);
2531 		return NULL;
2532 	}
2533 
2534 	return my_rxtx.rx;
2535 }
2536 
2537 void thread_spmc_relinquish(uint64_t cookie)
2538 {
2539 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2540 	struct thread_smc_args args = {
2541 		.a0 = FFA_MEM_RELINQUISH,
2542 	};
2543 
2544 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2545 	relinquish_desc->handle = cookie;
2546 	relinquish_desc->flags = 0;
2547 	relinquish_desc->endpoint_count = 1;
2548 	relinquish_desc->endpoint_id_array[0] = optee_endpoint_id;
2549 	thread_smccc(&args);
2550 	if (!is_ffa_success(args.a0))
2551 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2552 }
2553 
2554 static int set_pages(struct ffa_address_range *regions,
2555 		     unsigned int num_regions, unsigned int num_pages,
2556 		     struct mobj_ffa *mf)
2557 {
2558 	unsigned int n = 0;
2559 	unsigned int idx = 0;
2560 
2561 	for (n = 0; n < num_regions; n++) {
2562 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2563 		uint64_t addr = READ_ONCE(regions[n].address);
2564 
2565 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2566 			return FFA_INVALID_PARAMETERS;
2567 	}
2568 
2569 	if (idx != num_pages)
2570 		return FFA_INVALID_PARAMETERS;
2571 
2572 	return 0;
2573 }
2574 
2575 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2576 {
2577 	struct mobj_ffa *ret = NULL;
2578 	struct ffa_mem_transaction_x retrieve_desc = { };
2579 	struct ffa_mem_access *descr_array = NULL;
2580 	struct ffa_mem_region *descr = NULL;
2581 	struct mobj_ffa *mf = NULL;
2582 	unsigned int num_pages = 0;
2583 	unsigned int offs = 0;
2584 	void *buf = NULL;
2585 	struct thread_smc_args ffa_rx_release_args = {
2586 		.a0 = FFA_RX_RELEASE
2587 	};
2588 
2589 	/*
2590 	 * OP-TEE is only supporting a single mem_region while the
2591 	 * specification allows for more than one.
2592 	 */
2593 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2594 	if (!buf) {
2595 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2596 		     cookie);
2597 		return NULL;
2598 	}
2599 
2600 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2601 	offs = READ_ONCE(descr_array->region_offs);
2602 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2603 
2604 	num_pages = READ_ONCE(descr->total_page_count);
2605 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2606 	if (!mf)
2607 		goto out;
2608 
2609 	if (set_pages(descr->address_range_array,
2610 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2611 		mobj_ffa_spmc_delete(mf);
2612 		goto out;
2613 	}
2614 
2615 	ret = mf;
2616 
2617 out:
2618 	/* Release RX buffer after the mem retrieve request. */
2619 	thread_smccc(&ffa_rx_release_args);
2620 
2621 	return ret;
2622 }
2623 
2624 static TEE_Result spmc_init(void)
2625 {
2626 	unsigned int major = 0;
2627 	unsigned int minor __maybe_unused = 0;
2628 	uint32_t my_vers = 0;
2629 	uint32_t vers = 0;
2630 
2631 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2632 	vers = get_ffa_version(my_vers);
2633 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2634 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2635 	DMSG("SPMC reported version %u.%u", major, minor);
2636 	if (major != FFA_VERSION_MAJOR) {
2637 		EMSG("Incompatible major version %u, expected %u",
2638 		     major, FFA_VERSION_MAJOR);
2639 		panic();
2640 	}
2641 	if (vers < my_vers)
2642 		my_vers = vers;
2643 	DMSG("Using version %u.%u",
2644 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2645 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2646 	my_rxtx.ffa_vers = my_vers;
2647 
2648 	spmc_rxtx_map(&my_rxtx);
2649 
2650 	spmc_id = ffa_spm_id_get();
2651 	DMSG("SPMC ID %#"PRIx16, spmc_id);
2652 
2653 	optee_endpoint_id = ffa_id_get();
2654 	DMSG("OP-TEE endpoint ID %#"PRIx16, optee_endpoint_id);
2655 
2656 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2657 		spmc_notif_is_ready = true;
2658 		DMSG("Asynchronous notifications are ready");
2659 	}
2660 
2661 	return TEE_SUCCESS;
2662 }
2663 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2664 
2665 nex_service_init(spmc_init);
2666