xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 070d197fa568917d4b32fa2b379098715016c52d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 static unsigned int spmc_notif_lock = SPINLOCK_UNLOCK;
51 static int do_bottom_half_value = -1;
52 static uint16_t notif_vm_id;
53 static bool spmc_notif_is_ready;
54 
55 /* Initialized in spmc_init() below */
56 static uint16_t my_endpoint_id __nex_bss;
57 #ifdef CFG_CORE_SEL1_SPMC
58 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
59 				      FFA_PART_PROP_DIRECT_REQ_SEND |
60 #ifdef CFG_NS_VIRTUALIZATION
61 				      FFA_PART_PROP_NOTIF_CREATED |
62 				      FFA_PART_PROP_NOTIF_DESTROYED |
63 #endif
64 #ifdef ARM64
65 				      FFA_PART_PROP_AARCH64_STATE |
66 #endif
67 				      FFA_PART_PROP_IS_PE_ID;
68 
69 static uint32_t my_uuid_words[] = {
70 	/*
71 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
72 	 *   SP, or
73 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
74 	 *   logical partition, residing in the same exception level as the
75 	 *   SPMC
76 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
77 	 */
78 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
79 };
80 
81 /*
82  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
83  *
84  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
85  * access this includes the use of content of struct ffa_rxtx::rx and
86  * @frag_state_head.
87  *
88  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
89  * ffa_rxtx::tx and false when it is owned by normal world.
90  *
91  * Note that we can't prevent normal world from updating the content of
92  * these buffers so we must always be careful when reading. while we hold
93  * the lock.
94  */
95 
96 static struct ffa_rxtx my_rxtx __nex_bss;
97 
98 static bool is_nw_buf(struct ffa_rxtx *rxtx)
99 {
100 	return rxtx == &my_rxtx;
101 }
102 
103 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
104 	SLIST_HEAD_INITIALIZER(&frag_state_head);
105 
106 static uint64_t notif_pending_bitmap;
107 static uint64_t notif_bound_bitmap;
108 static bool notif_vm_id_valid;
109 static int notif_intid = -1;
110 #else
111 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
112 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
113 static struct ffa_rxtx my_rxtx = {
114 	.rx = __rx_buf,
115 	.tx = __tx_buf,
116 	.size = sizeof(__rx_buf),
117 };
118 #endif
119 
120 static uint32_t swap_src_dst(uint32_t src_dst)
121 {
122 	return (src_dst >> 16) | (src_dst << 16);
123 }
124 
125 static uint16_t get_sender_id(uint32_t src_dst)
126 {
127 	return src_dst >> 16;
128 }
129 
130 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
131 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
132 {
133 	*args = (struct thread_smc_args){ .a0 = fid,
134 					  .a1 = src_dst,
135 					  .a2 = w2,
136 					  .a3 = w3,
137 					  .a4 = w4,
138 					  .a5 = w5, };
139 }
140 
141 static void set_simple_ret_val(struct thread_smc_args *args, int ffa_ret)
142 {
143 	if (ffa_ret)
144 		spmc_set_args(args, FFA_ERROR, 0, ffa_ret, 0, 0, 0);
145 	else
146 		spmc_set_args(args, FFA_SUCCESS_32, 0, 0, 0, 0, 0);
147 }
148 
149 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
150 {
151 	/*
152 	 * No locking, if the caller does concurrent calls to this it's
153 	 * only making a mess for itself. We must be able to renegotiate
154 	 * the FF-A version in order to support differing versions between
155 	 * the loader and the driver.
156 	 */
157 	if (vers < FFA_VERSION_1_1)
158 		rxtx->ffa_vers = FFA_VERSION_1_0;
159 	else
160 		rxtx->ffa_vers = FFA_VERSION_1_1;
161 
162 	return rxtx->ffa_vers;
163 }
164 
165 static bool is_ffa_success(uint32_t fid)
166 {
167 #ifdef ARM64
168 	if (fid == FFA_SUCCESS_64)
169 		return true;
170 #endif
171 	return fid == FFA_SUCCESS_32;
172 }
173 
174 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
175 {
176 	if (is_ffa_success(args->a0))
177 		return FFA_OK;
178 	if (args->a0 == FFA_ERROR && args->a2)
179 		return args->a2;
180 	return FFA_NOT_SUPPORTED;
181 }
182 
183 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
184 			   unsigned long a3, unsigned long a4)
185 {
186 	struct thread_smc_args args = {
187 		.a0 = fid,
188 		.a1 = a1,
189 		.a2 = a2,
190 		.a3 = a3,
191 		.a4 = a4,
192 	};
193 
194 	thread_smccc(&args);
195 
196 	return get_ffa_ret_code(&args);
197 }
198 
199 static int __maybe_unused ffa_features(uint32_t id)
200 {
201 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
202 }
203 
204 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
205 					       uint32_t flags, uint64_t bitmap)
206 {
207 	return ffa_simple_call(FFA_NOTIFICATION_SET,
208 			       SHIFT_U32(src, 16) | dst, flags,
209 			       low32_from_64(bitmap), high32_from_64(bitmap));
210 }
211 
212 #if defined(CFG_CORE_SEL1_SPMC)
213 static void handle_features(struct thread_smc_args *args)
214 {
215 	uint32_t ret_fid = FFA_ERROR;
216 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
217 
218 	switch (args->a1) {
219 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
220 		if (spmc_notif_is_ready) {
221 			ret_fid = FFA_SUCCESS_32;
222 			ret_w2 = notif_intid;
223 		}
224 		break;
225 
226 #ifdef ARM64
227 	case FFA_RXTX_MAP_64:
228 #endif
229 	case FFA_RXTX_MAP_32:
230 		ret_fid = FFA_SUCCESS_32;
231 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
232 		break;
233 #ifdef ARM64
234 	case FFA_MEM_SHARE_64:
235 #endif
236 	case FFA_MEM_SHARE_32:
237 		ret_fid = FFA_SUCCESS_32;
238 		/*
239 		 * Partition manager supports transmission of a memory
240 		 * transaction descriptor in a buffer dynamically allocated
241 		 * by the endpoint.
242 		 */
243 		ret_w2 = BIT(0);
244 		break;
245 
246 	case FFA_ERROR:
247 	case FFA_VERSION:
248 	case FFA_SUCCESS_32:
249 #ifdef ARM64
250 	case FFA_SUCCESS_64:
251 #endif
252 	case FFA_FEATURES:
253 	case FFA_SPM_ID_GET:
254 	case FFA_MEM_FRAG_TX:
255 	case FFA_MEM_RECLAIM:
256 	case FFA_MSG_SEND_DIRECT_REQ_64:
257 	case FFA_MSG_SEND_DIRECT_REQ_32:
258 	case FFA_INTERRUPT:
259 	case FFA_PARTITION_INFO_GET:
260 	case FFA_RXTX_UNMAP:
261 	case FFA_RX_RELEASE:
262 	case FFA_FEATURE_MANAGED_EXIT_INTR:
263 	case FFA_NOTIFICATION_BITMAP_CREATE:
264 	case FFA_NOTIFICATION_BITMAP_DESTROY:
265 	case FFA_NOTIFICATION_BIND:
266 	case FFA_NOTIFICATION_UNBIND:
267 	case FFA_NOTIFICATION_SET:
268 	case FFA_NOTIFICATION_GET:
269 	case FFA_NOTIFICATION_INFO_GET_32:
270 #ifdef ARM64
271 	case FFA_NOTIFICATION_INFO_GET_64:
272 #endif
273 		ret_fid = FFA_SUCCESS_32;
274 		ret_w2 = FFA_PARAM_MBZ;
275 		break;
276 	default:
277 		break;
278 	}
279 
280 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
281 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
282 }
283 
284 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
285 {
286 	tee_mm_entry_t *mm = NULL;
287 
288 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
289 		return FFA_INVALID_PARAMETERS;
290 
291 	mm = tee_mm_alloc(&tee_mm_shm, sz);
292 	if (!mm)
293 		return FFA_NO_MEMORY;
294 
295 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
296 					  sz / SMALL_PAGE_SIZE,
297 					  MEM_AREA_NSEC_SHM)) {
298 		tee_mm_free(mm);
299 		return FFA_INVALID_PARAMETERS;
300 	}
301 
302 	*va_ret = (void *)tee_mm_get_smem(mm);
303 	return 0;
304 }
305 
306 static void handle_spm_id_get(struct thread_smc_args *args)
307 {
308 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
309 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
310 }
311 
312 static void unmap_buf(void *va, size_t sz)
313 {
314 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
315 
316 	assert(mm);
317 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
318 	tee_mm_free(mm);
319 }
320 
321 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
322 {
323 	int rc = 0;
324 	unsigned int sz = 0;
325 	paddr_t rx_pa = 0;
326 	paddr_t tx_pa = 0;
327 	void *rx = NULL;
328 	void *tx = NULL;
329 
330 	cpu_spin_lock(&rxtx->spinlock);
331 
332 	if (args->a3 & GENMASK_64(63, 6)) {
333 		rc = FFA_INVALID_PARAMETERS;
334 		goto out;
335 	}
336 
337 	sz = args->a3 * SMALL_PAGE_SIZE;
338 	if (!sz) {
339 		rc = FFA_INVALID_PARAMETERS;
340 		goto out;
341 	}
342 	/* TX/RX are swapped compared to the caller */
343 	tx_pa = args->a2;
344 	rx_pa = args->a1;
345 
346 	if (rxtx->size) {
347 		rc = FFA_DENIED;
348 		goto out;
349 	}
350 
351 	/*
352 	 * If the buffer comes from a SP the address is virtual and already
353 	 * mapped.
354 	 */
355 	if (is_nw_buf(rxtx)) {
356 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
357 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
358 			bool tx_alloced = false;
359 
360 			/*
361 			 * With virtualization we establish this mapping in
362 			 * the nexus mapping which then is replicated to
363 			 * each partition.
364 			 *
365 			 * This means that this mapping must be done before
366 			 * any partition is created and then must not be
367 			 * changed.
368 			 */
369 
370 			/*
371 			 * core_mmu_add_mapping() may reuse previous
372 			 * mappings. First check if there's any mappings to
373 			 * reuse so we know how to clean up in case of
374 			 * failure.
375 			 */
376 			tx = phys_to_virt(tx_pa, mt, sz);
377 			rx = phys_to_virt(rx_pa, mt, sz);
378 			if (!tx) {
379 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
380 				if (!tx) {
381 					rc = FFA_NO_MEMORY;
382 					goto out;
383 				}
384 				tx_alloced = true;
385 			}
386 			if (!rx)
387 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
388 
389 			if (!rx) {
390 				if (tx_alloced && tx)
391 					core_mmu_remove_mapping(mt, tx, sz);
392 				rc = FFA_NO_MEMORY;
393 				goto out;
394 			}
395 		} else {
396 			rc = map_buf(tx_pa, sz, &tx);
397 			if (rc)
398 				goto out;
399 			rc = map_buf(rx_pa, sz, &rx);
400 			if (rc) {
401 				unmap_buf(tx, sz);
402 				goto out;
403 			}
404 		}
405 		rxtx->tx = tx;
406 		rxtx->rx = rx;
407 	} else {
408 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
409 			rc = FFA_INVALID_PARAMETERS;
410 			goto out;
411 		}
412 
413 		if (!virt_to_phys((void *)tx_pa) ||
414 		    !virt_to_phys((void *)rx_pa)) {
415 			rc = FFA_INVALID_PARAMETERS;
416 			goto out;
417 		}
418 
419 		rxtx->tx = (void *)tx_pa;
420 		rxtx->rx = (void *)rx_pa;
421 	}
422 
423 	rxtx->size = sz;
424 	rxtx->tx_is_mine = true;
425 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
426 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
427 out:
428 	cpu_spin_unlock(&rxtx->spinlock);
429 	set_simple_ret_val(args, rc);
430 }
431 
432 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
433 {
434 	int rc = FFA_INVALID_PARAMETERS;
435 
436 	cpu_spin_lock(&rxtx->spinlock);
437 
438 	if (!rxtx->size)
439 		goto out;
440 
441 	/* We don't unmap the SP memory as the SP might still use it */
442 	if (is_nw_buf(rxtx)) {
443 		unmap_buf(rxtx->rx, rxtx->size);
444 		unmap_buf(rxtx->tx, rxtx->size);
445 	}
446 	rxtx->size = 0;
447 	rxtx->rx = NULL;
448 	rxtx->tx = NULL;
449 	rc = 0;
450 out:
451 	cpu_spin_unlock(&rxtx->spinlock);
452 	set_simple_ret_val(args, rc);
453 }
454 
455 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
456 {
457 	int rc = 0;
458 
459 	cpu_spin_lock(&rxtx->spinlock);
460 	/* The senders RX is our TX */
461 	if (!rxtx->size || rxtx->tx_is_mine) {
462 		rc = FFA_DENIED;
463 	} else {
464 		rc = 0;
465 		rxtx->tx_is_mine = true;
466 	}
467 	cpu_spin_unlock(&rxtx->spinlock);
468 
469 	set_simple_ret_val(args, rc);
470 }
471 
472 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
473 {
474 	return !w0 && !w1 && !w2 && !w3;
475 }
476 
477 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
478 {
479 	/*
480 	 * This depends on which UUID we have been assigned.
481 	 * TODO add a generic mechanism to obtain our UUID.
482 	 *
483 	 * The test below is for the hard coded UUID
484 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
485 	 */
486 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
487 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
488 }
489 
490 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
491 				     size_t idx, uint16_t endpoint_id,
492 				     uint16_t execution_context,
493 				     uint32_t part_props,
494 				     const uint32_t uuid_words[4])
495 {
496 	struct ffa_partition_info_x *fpi = NULL;
497 	size_t fpi_size = sizeof(*fpi);
498 
499 	if (ffa_vers >= FFA_VERSION_1_1)
500 		fpi_size += FFA_UUID_SIZE;
501 
502 	if ((idx + 1) * fpi_size > blen)
503 		return TEE_ERROR_OUT_OF_MEMORY;
504 
505 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
506 	fpi->id = endpoint_id;
507 	/* Number of execution contexts implemented by this partition */
508 	fpi->execution_context = execution_context;
509 
510 	fpi->partition_properties = part_props;
511 
512 	if (ffa_vers >= FFA_VERSION_1_1) {
513 		if (uuid_words)
514 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
515 		else
516 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
517 	}
518 
519 	return TEE_SUCCESS;
520 }
521 
522 static int handle_partition_info_get_all(size_t *elem_count,
523 					 struct ffa_rxtx *rxtx, bool count_only)
524 {
525 	if (!count_only) {
526 		/* Add OP-TEE SP */
527 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
528 					      rxtx->size, 0, my_endpoint_id,
529 					      CFG_TEE_CORE_NB_CORE,
530 					      my_part_props, my_uuid_words))
531 			return FFA_NO_MEMORY;
532 	}
533 	*elem_count = 1;
534 
535 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
536 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
537 					  NULL, elem_count, count_only))
538 			return FFA_NO_MEMORY;
539 	}
540 
541 	return FFA_OK;
542 }
543 
544 void spmc_handle_partition_info_get(struct thread_smc_args *args,
545 				    struct ffa_rxtx *rxtx)
546 {
547 	TEE_Result res = TEE_SUCCESS;
548 	uint32_t ret_fid = FFA_ERROR;
549 	uint32_t fpi_size = 0;
550 	uint32_t rc = 0;
551 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
552 
553 	if (!count_only) {
554 		cpu_spin_lock(&rxtx->spinlock);
555 
556 		if (!rxtx->size || !rxtx->tx_is_mine) {
557 			rc = FFA_BUSY;
558 			goto out;
559 		}
560 	}
561 
562 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
563 		size_t elem_count = 0;
564 
565 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
566 							count_only);
567 
568 		if (ret_fid) {
569 			rc = ret_fid;
570 			ret_fid = FFA_ERROR;
571 		} else {
572 			ret_fid = FFA_SUCCESS_32;
573 			rc = elem_count;
574 		}
575 
576 		goto out;
577 	}
578 
579 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
580 		if (!count_only) {
581 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
582 							rxtx->tx, rxtx->size, 0,
583 							my_endpoint_id,
584 							CFG_TEE_CORE_NB_CORE,
585 							my_part_props,
586 							my_uuid_words);
587 			if (res) {
588 				ret_fid = FFA_ERROR;
589 				rc = FFA_INVALID_PARAMETERS;
590 				goto out;
591 			}
592 		}
593 		rc = 1;
594 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
595 		uint32_t uuid_array[4] = { 0 };
596 		TEE_UUID uuid = { };
597 		size_t count = 0;
598 
599 		uuid_array[0] = args->a1;
600 		uuid_array[1] = args->a2;
601 		uuid_array[2] = args->a3;
602 		uuid_array[3] = args->a4;
603 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
604 
605 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
606 					    rxtx->size, &uuid, &count,
607 					    count_only);
608 		if (res != TEE_SUCCESS) {
609 			ret_fid = FFA_ERROR;
610 			rc = FFA_INVALID_PARAMETERS;
611 			goto out;
612 		}
613 		rc = count;
614 	} else {
615 		ret_fid = FFA_ERROR;
616 		rc = FFA_INVALID_PARAMETERS;
617 		goto out;
618 	}
619 
620 	ret_fid = FFA_SUCCESS_32;
621 
622 out:
623 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
624 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
625 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
626 
627 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
628 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
629 	if (!count_only) {
630 		rxtx->tx_is_mine = false;
631 		cpu_spin_unlock(&rxtx->spinlock);
632 	}
633 }
634 
635 static void spmc_handle_run(struct thread_smc_args *args)
636 {
637 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
638 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
639 	uint32_t rc = FFA_OK;
640 
641 	if (endpoint != my_endpoint_id) {
642 		/*
643 		 * The endpoint should be an SP, try to resume the SP from
644 		 * preempted into busy state.
645 		 */
646 		rc = spmc_sp_resume_from_preempted(endpoint);
647 		if (rc)
648 			goto out;
649 	}
650 
651 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
652 
653 	/* thread_resume_from_rpc return only of the thread_id is invalid */
654 	rc = FFA_INVALID_PARAMETERS;
655 
656 out:
657 	set_simple_ret_val(args, rc);
658 }
659 #endif /*CFG_CORE_SEL1_SPMC*/
660 
661 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
662 					uint16_t vm_id)
663 {
664 	uint32_t old_itr_status = 0;
665 
666 	if (!spmc_notif_is_ready) {
667 		/*
668 		 * This should never happen, not if normal world respects the
669 		 * exchanged capabilities.
670 		 */
671 		EMSG("Asynchronous notifications are not ready");
672 		return TEE_ERROR_NOT_IMPLEMENTED;
673 	}
674 
675 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
676 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
677 		return TEE_ERROR_BAD_PARAMETERS;
678 	}
679 
680 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
681 	do_bottom_half_value = bottom_half_value;
682 	if (!IS_ENABLED(CFG_CORE_SEL1_SPMC))
683 		notif_vm_id = vm_id;
684 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
685 
686 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
687 	return TEE_SUCCESS;
688 }
689 
690 static void handle_yielding_call(struct thread_smc_args *args,
691 				 uint32_t direct_resp_fid)
692 {
693 	TEE_Result res = 0;
694 
695 	thread_check_canaries();
696 
697 #ifdef ARM64
698 	/* Saving this for an eventual RPC */
699 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
700 #endif
701 
702 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
703 		/* Note connection to struct thread_rpc_arg::ret */
704 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
705 				       0);
706 		res = TEE_ERROR_BAD_PARAMETERS;
707 	} else {
708 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
709 				     args->a6, args->a7);
710 		res = TEE_ERROR_BUSY;
711 	}
712 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
713 		      0, res, 0, 0);
714 }
715 
716 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
717 {
718 	uint64_t cookie = reg_pair_to_64(a5, a4);
719 	uint32_t res = 0;
720 
721 	res = mobj_ffa_unregister_by_cookie(cookie);
722 	switch (res) {
723 	case TEE_SUCCESS:
724 	case TEE_ERROR_ITEM_NOT_FOUND:
725 		return 0;
726 	case TEE_ERROR_BUSY:
727 		EMSG("res %#"PRIx32, res);
728 		return FFA_BUSY;
729 	default:
730 		EMSG("res %#"PRIx32, res);
731 		return FFA_INVALID_PARAMETERS;
732 	}
733 }
734 
735 static void handle_blocking_call(struct thread_smc_args *args,
736 				 uint32_t direct_resp_fid)
737 {
738 	uint32_t sec_caps = 0;
739 
740 	switch (args->a3) {
741 	case OPTEE_FFA_GET_API_VERSION:
742 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
743 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
744 			      0);
745 		break;
746 	case OPTEE_FFA_GET_OS_VERSION:
747 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
748 			      CFG_OPTEE_REVISION_MAJOR,
749 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
750 		break;
751 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
752 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
753 		if (spmc_notif_is_ready)
754 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
755 		spmc_set_args(args, direct_resp_fid,
756 			      swap_src_dst(args->a1), 0, 0,
757 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
758 		break;
759 	case OPTEE_FFA_UNREGISTER_SHM:
760 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
761 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
762 		break;
763 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
764 		spmc_set_args(args, direct_resp_fid,
765 			      swap_src_dst(args->a1), 0,
766 			      spmc_enable_async_notif(args->a4,
767 						      FFA_SRC(args->a1)),
768 			      0, 0);
769 		break;
770 	default:
771 		EMSG("Unhandled blocking service ID %#"PRIx32,
772 		     (uint32_t)args->a3);
773 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
774 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
775 	}
776 }
777 
778 static void handle_framework_direct_request(struct thread_smc_args *args,
779 					    struct ffa_rxtx *rxtx,
780 					    uint32_t direct_resp_fid)
781 {
782 	uint32_t w0 = FFA_ERROR;
783 	uint32_t w1 = FFA_PARAM_MBZ;
784 	uint32_t w2 = FFA_NOT_SUPPORTED;
785 	uint32_t w3 = FFA_PARAM_MBZ;
786 
787 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
788 	case FFA_MSG_SEND_VM_CREATED:
789 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
790 			uint16_t guest_id = args->a5;
791 			TEE_Result res = virt_guest_created(guest_id);
792 
793 			w0 = direct_resp_fid;
794 			w1 = swap_src_dst(args->a1);
795 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
796 			if (res == TEE_SUCCESS)
797 				w3 = FFA_OK;
798 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
799 				w3 = FFA_DENIED;
800 			else
801 				w3 = FFA_INVALID_PARAMETERS;
802 		}
803 		break;
804 	case FFA_MSG_SEND_VM_DESTROYED:
805 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
806 			uint16_t guest_id = args->a5;
807 			TEE_Result res = virt_guest_destroyed(guest_id);
808 
809 			w0 = direct_resp_fid;
810 			w1 = swap_src_dst(args->a1);
811 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
812 			if (res == TEE_SUCCESS)
813 				w3 = FFA_OK;
814 			else
815 				w3 = FFA_INVALID_PARAMETERS;
816 		}
817 		break;
818 	case FFA_MSG_VERSION_REQ:
819 		w0 = direct_resp_fid;
820 		w1 = swap_src_dst(args->a1);
821 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
822 		w3 = spmc_exchange_version(args->a3, rxtx);
823 		break;
824 	default:
825 		break;
826 	}
827 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
828 }
829 
830 static void handle_direct_request(struct thread_smc_args *args,
831 				  struct ffa_rxtx *rxtx)
832 {
833 	uint32_t direct_resp_fid = 0;
834 
835 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
836 	    FFA_DST(args->a1) != my_endpoint_id) {
837 		spmc_sp_start_thread(args);
838 		return;
839 	}
840 
841 	if (OPTEE_SMC_IS_64(args->a0))
842 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
843 	else
844 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
845 
846 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
847 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
848 		return;
849 	}
850 
851 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
852 	    virt_set_guest(get_sender_id(args->a1))) {
853 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
854 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
855 		return;
856 	}
857 
858 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
859 		handle_yielding_call(args, direct_resp_fid);
860 	else
861 		handle_blocking_call(args, direct_resp_fid);
862 
863 	/*
864 	 * Note that handle_yielding_call() typically only returns if a
865 	 * thread cannot be allocated or found. virt_unset_guest() is also
866 	 * called from thread_state_suspend() and thread_state_free().
867 	 */
868 	virt_unset_guest();
869 }
870 
871 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
872 			      struct ffa_mem_transaction_x *trans)
873 {
874 	uint16_t mem_reg_attr = 0;
875 	uint32_t flags = 0;
876 	uint32_t count = 0;
877 	uint32_t offs = 0;
878 	uint32_t size = 0;
879 	size_t n = 0;
880 
881 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
882 		return FFA_INVALID_PARAMETERS;
883 
884 	if (ffa_vers >= FFA_VERSION_1_1) {
885 		struct ffa_mem_transaction_1_1 *descr = NULL;
886 
887 		if (blen < sizeof(*descr))
888 			return FFA_INVALID_PARAMETERS;
889 
890 		descr = buf;
891 		trans->sender_id = READ_ONCE(descr->sender_id);
892 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
893 		flags = READ_ONCE(descr->flags);
894 		trans->global_handle = READ_ONCE(descr->global_handle);
895 		trans->tag = READ_ONCE(descr->tag);
896 
897 		count = READ_ONCE(descr->mem_access_count);
898 		size = READ_ONCE(descr->mem_access_size);
899 		offs = READ_ONCE(descr->mem_access_offs);
900 	} else {
901 		struct ffa_mem_transaction_1_0 *descr = NULL;
902 
903 		if (blen < sizeof(*descr))
904 			return FFA_INVALID_PARAMETERS;
905 
906 		descr = buf;
907 		trans->sender_id = READ_ONCE(descr->sender_id);
908 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
909 		flags = READ_ONCE(descr->flags);
910 		trans->global_handle = READ_ONCE(descr->global_handle);
911 		trans->tag = READ_ONCE(descr->tag);
912 
913 		count = READ_ONCE(descr->mem_access_count);
914 		size = sizeof(struct ffa_mem_access);
915 		offs = offsetof(struct ffa_mem_transaction_1_0,
916 				mem_access_array);
917 	}
918 
919 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
920 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
921 		return FFA_INVALID_PARAMETERS;
922 
923 	/* Check that the endpoint memory access descriptor array fits */
924 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
925 	    n > blen)
926 		return FFA_INVALID_PARAMETERS;
927 
928 	trans->mem_reg_attr = mem_reg_attr;
929 	trans->flags = flags;
930 	trans->mem_access_size = size;
931 	trans->mem_access_count = count;
932 	trans->mem_access_offs = offs;
933 	return 0;
934 }
935 
936 #if defined(CFG_CORE_SEL1_SPMC)
937 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
938 			 unsigned int mem_access_count, uint8_t *acc_perms,
939 			 unsigned int *region_offs)
940 {
941 	struct ffa_mem_access_perm *descr = NULL;
942 	struct ffa_mem_access *mem_acc = NULL;
943 	unsigned int n = 0;
944 
945 	for (n = 0; n < mem_access_count; n++) {
946 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
947 		descr = &mem_acc->access_perm;
948 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
949 			*acc_perms = READ_ONCE(descr->perm);
950 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
951 			return 0;
952 		}
953 	}
954 
955 	return FFA_INVALID_PARAMETERS;
956 }
957 
958 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
959 			  size_t blen, unsigned int *page_count,
960 			  unsigned int *region_count, size_t *addr_range_offs)
961 {
962 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
963 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
964 	struct ffa_mem_region *region_descr = NULL;
965 	unsigned int region_descr_offs = 0;
966 	uint8_t mem_acc_perm = 0;
967 	size_t n = 0;
968 
969 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
970 		return FFA_INVALID_PARAMETERS;
971 
972 	/* Check that the access permissions matches what's expected */
973 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
974 			  mem_trans->mem_access_size,
975 			  mem_trans->mem_access_count,
976 			  &mem_acc_perm, &region_descr_offs) ||
977 	    mem_acc_perm != exp_mem_acc_perm)
978 		return FFA_INVALID_PARAMETERS;
979 
980 	/* Check that the Composite memory region descriptor fits */
981 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
982 	    n > blen)
983 		return FFA_INVALID_PARAMETERS;
984 
985 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
986 				  struct ffa_mem_region))
987 		return FFA_INVALID_PARAMETERS;
988 
989 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
990 						 region_descr_offs);
991 	*page_count = READ_ONCE(region_descr->total_page_count);
992 	*region_count = READ_ONCE(region_descr->address_range_count);
993 	*addr_range_offs = n;
994 	return 0;
995 }
996 
997 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
998 				size_t flen)
999 {
1000 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1001 	struct ffa_address_range *arange = NULL;
1002 	unsigned int n = 0;
1003 
1004 	if (region_count > s->region_count)
1005 		region_count = s->region_count;
1006 
1007 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1008 		return FFA_INVALID_PARAMETERS;
1009 	arange = buf;
1010 
1011 	for (n = 0; n < region_count; n++) {
1012 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1013 		uint64_t addr = READ_ONCE(arange[n].address);
1014 
1015 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1016 					  addr, page_count))
1017 			return FFA_INVALID_PARAMETERS;
1018 	}
1019 
1020 	s->region_count -= region_count;
1021 	if (s->region_count)
1022 		return region_count * sizeof(*arange);
1023 
1024 	if (s->current_page_idx != s->page_count)
1025 		return FFA_INVALID_PARAMETERS;
1026 
1027 	return 0;
1028 }
1029 
1030 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1031 {
1032 	int rc = 0;
1033 
1034 	rc = add_mem_share_helper(&s->share, buf, flen);
1035 	if (rc >= 0) {
1036 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1037 			/* We're not at the end of the descriptor yet */
1038 			if (s->share.region_count)
1039 				return s->frag_offset;
1040 
1041 			/* We're done */
1042 			rc = 0;
1043 		} else {
1044 			rc = FFA_INVALID_PARAMETERS;
1045 		}
1046 	}
1047 
1048 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1049 	if (rc < 0)
1050 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1051 	else
1052 		mobj_ffa_push_to_inactive(s->share.mf);
1053 	free(s);
1054 
1055 	return rc;
1056 }
1057 
1058 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1059 			void *buf)
1060 {
1061 	struct ffa_mem_access_perm *perm = NULL;
1062 	struct ffa_mem_access *mem_acc = NULL;
1063 
1064 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1065 		return false;
1066 
1067 	if (mem_trans->mem_access_count < 1)
1068 		return false;
1069 
1070 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1071 	perm = &mem_acc->access_perm;
1072 
1073 	/*
1074 	 * perm->endpoint_id is read here only to check if the endpoint is
1075 	 * OP-TEE. We do read it later on again, but there are some additional
1076 	 * checks there to make sure that the data is correct.
1077 	 */
1078 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
1079 }
1080 
1081 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1082 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1083 			 size_t flen, uint64_t *global_handle)
1084 {
1085 	int rc = 0;
1086 	struct mem_share_state share = { };
1087 	size_t addr_range_offs = 0;
1088 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1089 	size_t n = 0;
1090 
1091 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1092 			    &share.region_count, &addr_range_offs);
1093 	if (rc)
1094 		return rc;
1095 
1096 	if (MUL_OVERFLOW(share.region_count,
1097 			 sizeof(struct ffa_address_range), &n) ||
1098 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1099 		return FFA_INVALID_PARAMETERS;
1100 
1101 	if (mem_trans->global_handle)
1102 		cookie = mem_trans->global_handle;
1103 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1104 	if (!share.mf)
1105 		return FFA_NO_MEMORY;
1106 
1107 	if (flen != blen) {
1108 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
1109 
1110 		if (!s) {
1111 			rc = FFA_NO_MEMORY;
1112 			goto err;
1113 		}
1114 		s->share = share;
1115 		s->mm = mm;
1116 		s->frag_offset = addr_range_offs;
1117 
1118 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1119 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1120 					flen - addr_range_offs);
1121 
1122 		if (rc >= 0)
1123 			*global_handle = mobj_ffa_get_cookie(share.mf);
1124 
1125 		return rc;
1126 	}
1127 
1128 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1129 				  flen - addr_range_offs);
1130 	if (rc) {
1131 		/*
1132 		 * Number of consumed bytes may be returned instead of 0 for
1133 		 * done.
1134 		 */
1135 		rc = FFA_INVALID_PARAMETERS;
1136 		goto err;
1137 	}
1138 
1139 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1140 
1141 	return 0;
1142 err:
1143 	mobj_ffa_sel1_spmc_delete(share.mf);
1144 	return rc;
1145 }
1146 
1147 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1148 				 unsigned int page_count,
1149 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1150 {
1151 	struct ffa_mem_transaction_x mem_trans = { };
1152 	int rc = 0;
1153 	size_t len = 0;
1154 	void *buf = NULL;
1155 	tee_mm_entry_t *mm = NULL;
1156 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1157 
1158 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1159 		return FFA_INVALID_PARAMETERS;
1160 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1161 		return FFA_INVALID_PARAMETERS;
1162 
1163 	/*
1164 	 * Check that the length reported in flen is covered by len even
1165 	 * if the offset is taken into account.
1166 	 */
1167 	if (len < flen || len - offs < flen)
1168 		return FFA_INVALID_PARAMETERS;
1169 
1170 	mm = tee_mm_alloc(&tee_mm_shm, len);
1171 	if (!mm)
1172 		return FFA_NO_MEMORY;
1173 
1174 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1175 					  page_count, MEM_AREA_NSEC_SHM)) {
1176 		rc = FFA_INVALID_PARAMETERS;
1177 		goto out;
1178 	}
1179 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1180 
1181 	cpu_spin_lock(&rxtx->spinlock);
1182 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1183 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1184 	    virt_set_guest(mem_trans.sender_id))
1185 		rc = FFA_DENIED;
1186 	if (!rc)
1187 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1188 				   global_handle);
1189 	virt_unset_guest();
1190 	cpu_spin_unlock(&rxtx->spinlock);
1191 	if (rc > 0)
1192 		return rc;
1193 
1194 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1195 out:
1196 	tee_mm_free(mm);
1197 	return rc;
1198 }
1199 
1200 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1201 				  uint64_t *global_handle,
1202 				  struct ffa_rxtx *rxtx)
1203 {
1204 	struct ffa_mem_transaction_x mem_trans = { };
1205 	int rc = FFA_DENIED;
1206 
1207 	cpu_spin_lock(&rxtx->spinlock);
1208 
1209 	if (!rxtx->rx || flen > rxtx->size)
1210 		goto out;
1211 
1212 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1213 				       &mem_trans);
1214 	if (rc)
1215 		goto out;
1216 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1217 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1218 				       global_handle, NULL);
1219 		goto out;
1220 	}
1221 
1222 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1223 	    virt_set_guest(mem_trans.sender_id))
1224 		goto out;
1225 
1226 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1227 			   global_handle);
1228 
1229 	virt_unset_guest();
1230 
1231 out:
1232 	cpu_spin_unlock(&rxtx->spinlock);
1233 
1234 	return rc;
1235 }
1236 
1237 static void handle_mem_share(struct thread_smc_args *args,
1238 			     struct ffa_rxtx *rxtx)
1239 {
1240 	uint32_t tot_len = args->a1;
1241 	uint32_t frag_len = args->a2;
1242 	uint64_t addr = args->a3;
1243 	uint32_t page_count = args->a4;
1244 	uint32_t ret_w1 = 0;
1245 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1246 	uint32_t ret_w3 = 0;
1247 	uint32_t ret_fid = FFA_ERROR;
1248 	uint64_t global_handle = 0;
1249 	int rc = 0;
1250 
1251 	/* Check that the MBZs are indeed 0 */
1252 	if (args->a5 || args->a6 || args->a7)
1253 		goto out;
1254 
1255 	/* Check that fragment length doesn't exceed total length */
1256 	if (frag_len > tot_len)
1257 		goto out;
1258 
1259 	/* Check for 32-bit calling convention */
1260 	if (args->a0 == FFA_MEM_SHARE_32)
1261 		addr &= UINT32_MAX;
1262 
1263 	if (!addr) {
1264 		/*
1265 		 * The memory transaction descriptor is passed via our rx
1266 		 * buffer.
1267 		 */
1268 		if (page_count)
1269 			goto out;
1270 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1271 					    rxtx);
1272 	} else {
1273 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1274 					   &global_handle, rxtx);
1275 	}
1276 	if (rc < 0) {
1277 		ret_w2 = rc;
1278 	} else if (rc > 0) {
1279 		ret_fid = FFA_MEM_FRAG_RX;
1280 		ret_w3 = rc;
1281 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1282 	} else {
1283 		ret_fid = FFA_SUCCESS_32;
1284 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1285 	}
1286 out:
1287 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1288 }
1289 
1290 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1291 {
1292 	struct mem_frag_state *s = NULL;
1293 
1294 	SLIST_FOREACH(s, &frag_state_head, link)
1295 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1296 			return s;
1297 
1298 	return NULL;
1299 }
1300 
1301 static void handle_mem_frag_tx(struct thread_smc_args *args,
1302 			       struct ffa_rxtx *rxtx)
1303 {
1304 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1305 	size_t flen = args->a3;
1306 	uint32_t endpoint_id = args->a4;
1307 	struct mem_frag_state *s = NULL;
1308 	tee_mm_entry_t *mm = NULL;
1309 	unsigned int page_count = 0;
1310 	void *buf = NULL;
1311 	uint32_t ret_w1 = 0;
1312 	uint32_t ret_w2 = 0;
1313 	uint32_t ret_w3 = 0;
1314 	uint32_t ret_fid = 0;
1315 	int rc = 0;
1316 
1317 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1318 		uint16_t guest_id = endpoint_id >> 16;
1319 
1320 		if (!guest_id || virt_set_guest(guest_id)) {
1321 			rc = FFA_INVALID_PARAMETERS;
1322 			goto out_set_rc;
1323 		}
1324 	}
1325 
1326 	/*
1327 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1328 	 * requests.
1329 	 */
1330 
1331 	cpu_spin_lock(&rxtx->spinlock);
1332 
1333 	s = get_frag_state(global_handle);
1334 	if (!s) {
1335 		rc = FFA_INVALID_PARAMETERS;
1336 		goto out;
1337 	}
1338 
1339 	mm = s->mm;
1340 	if (mm) {
1341 		if (flen > tee_mm_get_bytes(mm)) {
1342 			rc = FFA_INVALID_PARAMETERS;
1343 			goto out;
1344 		}
1345 		page_count = s->share.page_count;
1346 		buf = (void *)tee_mm_get_smem(mm);
1347 	} else {
1348 		if (flen > rxtx->size) {
1349 			rc = FFA_INVALID_PARAMETERS;
1350 			goto out;
1351 		}
1352 		buf = rxtx->rx;
1353 	}
1354 
1355 	rc = add_mem_share_frag(s, buf, flen);
1356 out:
1357 	virt_unset_guest();
1358 	cpu_spin_unlock(&rxtx->spinlock);
1359 
1360 	if (rc <= 0 && mm) {
1361 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1362 		tee_mm_free(mm);
1363 	}
1364 
1365 out_set_rc:
1366 	if (rc < 0) {
1367 		ret_fid = FFA_ERROR;
1368 		ret_w2 = rc;
1369 	} else if (rc > 0) {
1370 		ret_fid = FFA_MEM_FRAG_RX;
1371 		ret_w3 = rc;
1372 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1373 	} else {
1374 		ret_fid = FFA_SUCCESS_32;
1375 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1376 	}
1377 
1378 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1379 }
1380 
1381 static void handle_mem_reclaim(struct thread_smc_args *args)
1382 {
1383 	int rc = FFA_INVALID_PARAMETERS;
1384 	uint64_t cookie = 0;
1385 
1386 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1387 		goto out;
1388 
1389 	cookie = reg_pair_to_64(args->a2, args->a1);
1390 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1391 		uint16_t guest_id = 0;
1392 
1393 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1394 			guest_id = virt_find_guest_by_cookie(cookie);
1395 		} else {
1396 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1397 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1398 		}
1399 		if (!guest_id || virt_set_guest(guest_id))
1400 			goto out;
1401 	}
1402 
1403 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1404 	case TEE_SUCCESS:
1405 		rc = FFA_OK;
1406 		break;
1407 	case TEE_ERROR_ITEM_NOT_FOUND:
1408 		DMSG("cookie %#"PRIx64" not found", cookie);
1409 		rc = FFA_INVALID_PARAMETERS;
1410 		break;
1411 	default:
1412 		DMSG("cookie %#"PRIx64" busy", cookie);
1413 		rc = FFA_DENIED;
1414 		break;
1415 	}
1416 
1417 	virt_unset_guest();
1418 
1419 out:
1420 	set_simple_ret_val(args, rc);
1421 }
1422 
1423 static void handle_notification_bitmap_create(struct thread_smc_args *args)
1424 {
1425 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1426 	uint32_t ret_fid = FFA_ERROR;
1427 	uint32_t old_itr_status = 0;
1428 
1429 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1430 	    !args->a5 && !args->a6 && !args->a7) {
1431 		uint16_t vm_id = args->a1;
1432 
1433 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1434 
1435 		if (notif_vm_id_valid) {
1436 			if (vm_id == notif_vm_id)
1437 				ret_val = FFA_DENIED;
1438 			else
1439 				ret_val = FFA_NO_MEMORY;
1440 		} else {
1441 			notif_vm_id = vm_id;
1442 			notif_vm_id_valid = true;
1443 			ret_val = FFA_OK;
1444 			ret_fid = FFA_SUCCESS_32;
1445 		}
1446 
1447 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1448 	}
1449 
1450 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1451 }
1452 
1453 static void handle_notification_bitmap_destroy(struct thread_smc_args *args)
1454 {
1455 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1456 	uint32_t ret_fid = FFA_ERROR;
1457 	uint32_t old_itr_status = 0;
1458 
1459 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1460 	    !args->a5 && !args->a6 && !args->a7) {
1461 		uint16_t vm_id = args->a1;
1462 
1463 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1464 
1465 		if (notif_vm_id_valid && vm_id == notif_vm_id) {
1466 			if (notif_pending_bitmap || notif_bound_bitmap) {
1467 				ret_val = FFA_DENIED;
1468 			} else {
1469 				notif_vm_id_valid = false;
1470 				ret_val = FFA_OK;
1471 				ret_fid = FFA_SUCCESS_32;
1472 			}
1473 		}
1474 
1475 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1476 	}
1477 
1478 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1479 }
1480 
1481 static void handle_notification_bind(struct thread_smc_args *args)
1482 {
1483 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1484 	uint32_t ret_fid = FFA_ERROR;
1485 	uint32_t old_itr_status = 0;
1486 	uint64_t bitmap = 0;
1487 	uint16_t vm_id = 0;
1488 
1489 	if (args->a5 || args->a6 || args->a7)
1490 		goto out;
1491 	if (args->a2) {
1492 		/* We only deal with global notifications for now */
1493 		ret_val = FFA_NOT_SUPPORTED;
1494 		goto out;
1495 	}
1496 
1497 	/* The destination of the eventual notification */
1498 	vm_id = FFA_DST(args->a1);
1499 	bitmap = reg_pair_to_64(args->a4, args->a3);
1500 
1501 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1502 
1503 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1504 		if (bitmap & notif_bound_bitmap) {
1505 			ret_val = FFA_DENIED;
1506 		} else {
1507 			notif_bound_bitmap |= bitmap;
1508 			ret_val = FFA_OK;
1509 			ret_fid = FFA_SUCCESS_32;
1510 		}
1511 	}
1512 
1513 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1514 out:
1515 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1516 }
1517 
1518 static void handle_notification_unbind(struct thread_smc_args *args)
1519 {
1520 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1521 	uint32_t ret_fid = FFA_ERROR;
1522 	uint32_t old_itr_status = 0;
1523 	uint64_t bitmap = 0;
1524 	uint16_t vm_id = 0;
1525 
1526 	if (args->a2 || args->a5 || args->a6 || args->a7)
1527 		goto out;
1528 
1529 	/* The destination of the eventual notification */
1530 	vm_id = FFA_DST(args->a1);
1531 	bitmap = reg_pair_to_64(args->a4, args->a3);
1532 
1533 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1534 
1535 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1536 		/*
1537 		 * Spec says:
1538 		 * At least one notification is bound to another Sender or
1539 		 * is currently pending.
1540 		 *
1541 		 * Not sure what the intention is.
1542 		 */
1543 		if (bitmap & notif_pending_bitmap) {
1544 			ret_val = FFA_DENIED;
1545 		} else {
1546 			notif_bound_bitmap &= ~bitmap;
1547 			ret_val = FFA_OK;
1548 			ret_fid = FFA_SUCCESS_32;
1549 		}
1550 	}
1551 
1552 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1553 out:
1554 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1555 }
1556 
1557 static void handle_notification_get(struct thread_smc_args *args)
1558 {
1559 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1560 	uint32_t ret_fid = FFA_ERROR;
1561 	uint32_t old_itr_status = 0;
1562 	uint16_t vm_id = 0;
1563 	uint32_t w3 = 0;
1564 
1565 	if (args->a5 || args->a6 || args->a7)
1566 		goto out;
1567 	if (!(args->a2 & 0x1)) {
1568 		ret_fid = FFA_SUCCESS_32;
1569 		w2 = 0;
1570 		goto out;
1571 	}
1572 	vm_id = FFA_DST(args->a1);
1573 
1574 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1575 
1576 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1577 		reg_pair_from_64(notif_pending_bitmap, &w3, &w2);
1578 		notif_pending_bitmap = 0;
1579 		ret_fid = FFA_SUCCESS_32;
1580 	}
1581 
1582 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1583 out:
1584 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1585 }
1586 
1587 static void handle_notification_info_get(struct thread_smc_args *args)
1588 {
1589 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1590 	uint32_t ret_fid = FFA_ERROR;
1591 
1592 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1593 	    args->a6 || args->a7)
1594 		goto out;
1595 
1596 	if (OPTEE_SMC_IS_64(args->a0))
1597 		ret_fid = FFA_SUCCESS_64;
1598 	else
1599 		ret_fid = FFA_SUCCESS_32;
1600 
1601 	/*
1602 	 * Note, we're only supporting physical OS kernel in normal world
1603 	 * with Global Notifications.
1604 	 * So one list of ID list registers (BIT[11:7])
1605 	 * and one count of IDs (BIT[13:12] + 1)
1606 	 * and the VM is always 0.
1607 	 */
1608 	w2 = SHIFT_U32(1, 7);
1609 out:
1610 	spmc_set_args(args, ret_fid, 0, w2, 0, 0, 0);
1611 }
1612 
1613 void thread_spmc_set_async_notif_intid(int intid)
1614 {
1615 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1616 	notif_intid = intid;
1617 	spmc_notif_is_ready = true;
1618 	DMSG("Asynchronous notifications are ready");
1619 }
1620 
1621 void notif_send_async(uint32_t value)
1622 {
1623 	uint32_t old_itr_status = 0;
1624 
1625 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1626 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1627 	       do_bottom_half_value >= 0 && notif_intid >= 0);
1628 	notif_pending_bitmap |= BIT64(do_bottom_half_value);
1629 	interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1630 			    ITR_CPU_MASK_TO_THIS_CPU);
1631 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1632 }
1633 #else
1634 void notif_send_async(uint32_t value)
1635 {
1636 	/* global notification, delay notification interrupt */
1637 	uint32_t flags = BIT32(1);
1638 	int res = 0;
1639 
1640 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1641 	       do_bottom_half_value >= 0);
1642 	res = ffa_set_notification(notif_vm_id, my_endpoint_id, flags,
1643 				   BIT64(do_bottom_half_value));
1644 	if (res) {
1645 		EMSG("notification set failed with error %d", res);
1646 		panic();
1647 	}
1648 }
1649 #endif
1650 
1651 /* Only called from assembly */
1652 void thread_spmc_msg_recv(struct thread_smc_args *args);
1653 void thread_spmc_msg_recv(struct thread_smc_args *args)
1654 {
1655 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1656 	switch (args->a0) {
1657 #if defined(CFG_CORE_SEL1_SPMC)
1658 	case FFA_FEATURES:
1659 		handle_features(args);
1660 		break;
1661 	case FFA_SPM_ID_GET:
1662 		handle_spm_id_get(args);
1663 		break;
1664 #ifdef ARM64
1665 	case FFA_RXTX_MAP_64:
1666 #endif
1667 	case FFA_RXTX_MAP_32:
1668 		spmc_handle_rxtx_map(args, &my_rxtx);
1669 		break;
1670 	case FFA_RXTX_UNMAP:
1671 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1672 		break;
1673 	case FFA_RX_RELEASE:
1674 		spmc_handle_rx_release(args, &my_rxtx);
1675 		break;
1676 	case FFA_PARTITION_INFO_GET:
1677 		spmc_handle_partition_info_get(args, &my_rxtx);
1678 		break;
1679 	case FFA_RUN:
1680 		spmc_handle_run(args);
1681 		break;
1682 #endif /*CFG_CORE_SEL1_SPMC*/
1683 	case FFA_INTERRUPT:
1684 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1685 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1686 				      0, 0);
1687 		else
1688 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1689 		break;
1690 #ifdef ARM64
1691 	case FFA_MSG_SEND_DIRECT_REQ_64:
1692 #endif
1693 	case FFA_MSG_SEND_DIRECT_REQ_32:
1694 		handle_direct_request(args, &my_rxtx);
1695 		break;
1696 #if defined(CFG_CORE_SEL1_SPMC)
1697 #ifdef ARM64
1698 	case FFA_MEM_SHARE_64:
1699 #endif
1700 	case FFA_MEM_SHARE_32:
1701 		handle_mem_share(args, &my_rxtx);
1702 		break;
1703 	case FFA_MEM_RECLAIM:
1704 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1705 		    !ffa_mem_reclaim(args, NULL))
1706 			handle_mem_reclaim(args);
1707 		break;
1708 	case FFA_MEM_FRAG_TX:
1709 		handle_mem_frag_tx(args, &my_rxtx);
1710 		break;
1711 	case FFA_NOTIFICATION_BITMAP_CREATE:
1712 		handle_notification_bitmap_create(args);
1713 		break;
1714 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1715 		handle_notification_bitmap_destroy(args);
1716 		break;
1717 	case FFA_NOTIFICATION_BIND:
1718 		handle_notification_bind(args);
1719 		break;
1720 	case FFA_NOTIFICATION_UNBIND:
1721 		handle_notification_unbind(args);
1722 		break;
1723 	case FFA_NOTIFICATION_GET:
1724 		handle_notification_get(args);
1725 		break;
1726 #ifdef ARM64
1727 	case FFA_NOTIFICATION_INFO_GET_64:
1728 #endif
1729 	case FFA_NOTIFICATION_INFO_GET_32:
1730 		handle_notification_info_get(args);
1731 		break;
1732 #endif /*CFG_CORE_SEL1_SPMC*/
1733 	case FFA_ERROR:
1734 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1735 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1736 			/*
1737 			 * The SPMC will return an FFA_ERROR back so better
1738 			 * panic() now than flooding the log.
1739 			 */
1740 			panic("FFA_ERROR from SPMC is fatal");
1741 		}
1742 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1743 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1744 		break;
1745 	default:
1746 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1747 		set_simple_ret_val(args, FFA_NOT_SUPPORTED);
1748 	}
1749 }
1750 
1751 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1752 {
1753 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1754 	struct thread_ctx *thr = threads + thread_get_id();
1755 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1756 	struct optee_msg_arg *arg = NULL;
1757 	struct mobj *mobj = NULL;
1758 	uint32_t num_params = 0;
1759 	size_t sz = 0;
1760 
1761 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1762 	if (!mobj) {
1763 		EMSG("Can't find cookie %#"PRIx64, cookie);
1764 		return TEE_ERROR_BAD_PARAMETERS;
1765 	}
1766 
1767 	res = mobj_inc_map(mobj);
1768 	if (res)
1769 		goto out_put_mobj;
1770 
1771 	res = TEE_ERROR_BAD_PARAMETERS;
1772 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1773 	if (!arg)
1774 		goto out_dec_map;
1775 
1776 	num_params = READ_ONCE(arg->num_params);
1777 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1778 		goto out_dec_map;
1779 
1780 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1781 
1782 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1783 	if (!thr->rpc_arg)
1784 		goto out_dec_map;
1785 
1786 	virt_on_stdcall();
1787 	res = tee_entry_std(arg, num_params);
1788 
1789 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1790 	thr->rpc_arg = NULL;
1791 
1792 out_dec_map:
1793 	mobj_dec_map(mobj);
1794 out_put_mobj:
1795 	mobj_put(mobj);
1796 	return res;
1797 }
1798 
1799 /*
1800  * Helper routine for the assembly function thread_std_smc_entry()
1801  *
1802  * Note: this function is weak just to make link_dummies_paged.c happy.
1803  */
1804 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1805 				       uint32_t a2, uint32_t a3,
1806 				       uint32_t a4, uint32_t a5 __unused)
1807 {
1808 	/*
1809 	 * Arguments are supplied from handle_yielding_call() as:
1810 	 * a0 <- w1
1811 	 * a1 <- w3
1812 	 * a2 <- w4
1813 	 * a3 <- w5
1814 	 * a4 <- w6
1815 	 * a5 <- w7
1816 	 */
1817 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1818 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1819 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1820 	return FFA_DENIED;
1821 }
1822 
1823 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1824 {
1825 	uint64_t offs = tpm->u.memref.offs;
1826 
1827 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1828 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1829 
1830 	param->u.fmem.offs_low = offs;
1831 	param->u.fmem.offs_high = offs >> 32;
1832 	if (param->u.fmem.offs_high != offs >> 32)
1833 		return false;
1834 
1835 	param->u.fmem.size = tpm->u.memref.size;
1836 	if (tpm->u.memref.mobj) {
1837 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1838 
1839 		/* If a mobj is passed it better be one with a valid cookie. */
1840 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1841 			return false;
1842 		param->u.fmem.global_id = cookie;
1843 	} else {
1844 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1845 	}
1846 
1847 	return true;
1848 }
1849 
1850 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1851 			    struct thread_param *params,
1852 			    struct optee_msg_arg **arg_ret)
1853 {
1854 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1855 	struct thread_ctx *thr = threads + thread_get_id();
1856 	struct optee_msg_arg *arg = thr->rpc_arg;
1857 
1858 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1859 		return TEE_ERROR_BAD_PARAMETERS;
1860 
1861 	if (!arg) {
1862 		EMSG("rpc_arg not set");
1863 		return TEE_ERROR_GENERIC;
1864 	}
1865 
1866 	memset(arg, 0, sz);
1867 	arg->cmd = cmd;
1868 	arg->num_params = num_params;
1869 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1870 
1871 	for (size_t n = 0; n < num_params; n++) {
1872 		switch (params[n].attr) {
1873 		case THREAD_PARAM_ATTR_NONE:
1874 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1875 			break;
1876 		case THREAD_PARAM_ATTR_VALUE_IN:
1877 		case THREAD_PARAM_ATTR_VALUE_OUT:
1878 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1879 			arg->params[n].attr = params[n].attr -
1880 					      THREAD_PARAM_ATTR_VALUE_IN +
1881 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1882 			arg->params[n].u.value.a = params[n].u.value.a;
1883 			arg->params[n].u.value.b = params[n].u.value.b;
1884 			arg->params[n].u.value.c = params[n].u.value.c;
1885 			break;
1886 		case THREAD_PARAM_ATTR_MEMREF_IN:
1887 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1888 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1889 			if (!set_fmem(arg->params + n, params + n))
1890 				return TEE_ERROR_BAD_PARAMETERS;
1891 			break;
1892 		default:
1893 			return TEE_ERROR_BAD_PARAMETERS;
1894 		}
1895 	}
1896 
1897 	if (arg_ret)
1898 		*arg_ret = arg;
1899 
1900 	return TEE_SUCCESS;
1901 }
1902 
1903 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1904 				struct thread_param *params)
1905 {
1906 	for (size_t n = 0; n < num_params; n++) {
1907 		switch (params[n].attr) {
1908 		case THREAD_PARAM_ATTR_VALUE_OUT:
1909 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1910 			params[n].u.value.a = arg->params[n].u.value.a;
1911 			params[n].u.value.b = arg->params[n].u.value.b;
1912 			params[n].u.value.c = arg->params[n].u.value.c;
1913 			break;
1914 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1915 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1916 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1917 			break;
1918 		default:
1919 			break;
1920 		}
1921 	}
1922 
1923 	return arg->ret;
1924 }
1925 
1926 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1927 			struct thread_param *params)
1928 {
1929 	struct thread_rpc_arg rpc_arg = { .call = {
1930 			.w1 = thread_get_tsd()->rpc_target_info,
1931 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1932 		},
1933 	};
1934 	struct optee_msg_arg *arg = NULL;
1935 	uint32_t ret = 0;
1936 
1937 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1938 	if (ret)
1939 		return ret;
1940 
1941 	thread_rpc(&rpc_arg);
1942 
1943 	return get_rpc_arg_res(arg, num_params, params);
1944 }
1945 
1946 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1947 {
1948 	struct thread_rpc_arg rpc_arg = { .call = {
1949 			.w1 = thread_get_tsd()->rpc_target_info,
1950 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1951 		},
1952 	};
1953 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1954 	uint32_t res2 = 0;
1955 	uint32_t res = 0;
1956 
1957 	DMSG("freeing cookie %#"PRIx64, cookie);
1958 
1959 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1960 
1961 	mobj_put(mobj);
1962 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1963 	if (res2)
1964 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1965 		     cookie, res2);
1966 	if (!res)
1967 		thread_rpc(&rpc_arg);
1968 }
1969 
1970 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1971 {
1972 	struct thread_rpc_arg rpc_arg = { .call = {
1973 			.w1 = thread_get_tsd()->rpc_target_info,
1974 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1975 		},
1976 	};
1977 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1978 	struct optee_msg_arg *arg = NULL;
1979 	unsigned int internal_offset = 0;
1980 	struct mobj *mobj = NULL;
1981 	uint64_t cookie = 0;
1982 
1983 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1984 		return NULL;
1985 
1986 	thread_rpc(&rpc_arg);
1987 
1988 	if (arg->num_params != 1 ||
1989 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1990 		return NULL;
1991 
1992 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1993 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1994 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1995 	if (!mobj) {
1996 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1997 		     cookie, internal_offset);
1998 		return NULL;
1999 	}
2000 
2001 	assert(mobj_is_nonsec(mobj));
2002 
2003 	if (mobj->size < size) {
2004 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2005 		mobj_put(mobj);
2006 		return NULL;
2007 	}
2008 
2009 	if (mobj_inc_map(mobj)) {
2010 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2011 		mobj_put(mobj);
2012 		return NULL;
2013 	}
2014 
2015 	return mobj;
2016 }
2017 
2018 struct mobj *thread_rpc_alloc_payload(size_t size)
2019 {
2020 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2021 }
2022 
2023 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2024 {
2025 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2026 }
2027 
2028 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2029 {
2030 	if (mobj)
2031 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2032 				mobj_get_cookie(mobj), mobj);
2033 }
2034 
2035 void thread_rpc_free_payload(struct mobj *mobj)
2036 {
2037 	if (mobj)
2038 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2039 				mobj);
2040 }
2041 
2042 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2043 {
2044 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2045 }
2046 
2047 void thread_rpc_free_global_payload(struct mobj *mobj)
2048 {
2049 	if (mobj)
2050 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2051 				mobj_get_cookie(mobj), mobj);
2052 }
2053 
2054 void thread_spmc_register_secondary_ep(vaddr_t ep)
2055 {
2056 	unsigned long ret = 0;
2057 
2058 	/* Let the SPM know the entry point for secondary CPUs */
2059 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2060 
2061 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2062 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2063 }
2064 
2065 #if defined(CFG_CORE_SEL1_SPMC)
2066 static TEE_Result spmc_init(void)
2067 {
2068 	my_endpoint_id = SPMC_ENDPOINT_ID;
2069 	DMSG("My endpoint ID %#x", my_endpoint_id);
2070 
2071 	/*
2072 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2073 	 * normal world regardless of what version we query the SPM with.
2074 	 * However, if SPMD think we are version 1.1 it will forward
2075 	 * queries from normal world to let us negotiate version. So by
2076 	 * setting version 1.0 here we should be compatible.
2077 	 *
2078 	 * Note that disagreement on negotiated version means that we'll
2079 	 * have communication problems with normal world.
2080 	 */
2081 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2082 
2083 	return TEE_SUCCESS;
2084 }
2085 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2086 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2087 {
2088 	struct thread_smc_args args = {
2089 #ifdef ARM64
2090 		.a0 = FFA_RXTX_MAP_64,
2091 #else
2092 		.a0 = FFA_RXTX_MAP_32,
2093 #endif
2094 		.a1 = virt_to_phys(rxtx->tx),
2095 		.a2 = virt_to_phys(rxtx->rx),
2096 		.a3 = 1,
2097 	};
2098 
2099 	thread_smccc(&args);
2100 	if (!is_ffa_success(args.a0)) {
2101 		if (args.a0 == FFA_ERROR)
2102 			EMSG("rxtx map failed with error %ld", args.a2);
2103 		else
2104 			EMSG("rxtx map failed");
2105 		panic();
2106 	}
2107 }
2108 
2109 static uint16_t get_my_id(void)
2110 {
2111 	struct thread_smc_args args = {
2112 		.a0 = FFA_ID_GET,
2113 	};
2114 
2115 	thread_smccc(&args);
2116 	if (!is_ffa_success(args.a0)) {
2117 		if (args.a0 == FFA_ERROR)
2118 			EMSG("Get id failed with error %ld", args.a2);
2119 		else
2120 			EMSG("Get id failed");
2121 		panic();
2122 	}
2123 
2124 	return args.a2;
2125 }
2126 
2127 static uint32_t get_ffa_version(uint32_t my_version)
2128 {
2129 	struct thread_smc_args args = {
2130 		.a0 = FFA_VERSION,
2131 		.a1 = my_version,
2132 	};
2133 
2134 	thread_smccc(&args);
2135 	if (args.a0 & BIT(31)) {
2136 		EMSG("FF-A version failed with error %ld", args.a0);
2137 		panic();
2138 	}
2139 
2140 	return args.a0;
2141 }
2142 
2143 static void *spmc_retrieve_req(uint64_t cookie,
2144 			       struct ffa_mem_transaction_x *trans)
2145 {
2146 	struct ffa_mem_access *acc_descr_array = NULL;
2147 	struct ffa_mem_access_perm *perm_descr = NULL;
2148 	struct thread_smc_args args = {
2149 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2150 		.a3 =	0,	/* Address, Using TX -> MBZ */
2151 		.a4 =   0,	/* Using TX -> MBZ */
2152 	};
2153 	size_t size = 0;
2154 	int rc = 0;
2155 
2156 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2157 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2158 
2159 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2160 		memset(trans_descr, 0, size);
2161 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2162 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2163 		trans_descr->global_handle = cookie;
2164 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2165 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2166 		trans_descr->mem_access_count = 1;
2167 		acc_descr_array = trans_descr->mem_access_array;
2168 	} else {
2169 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2170 
2171 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2172 		memset(trans_descr, 0, size);
2173 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2174 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2175 		trans_descr->global_handle = cookie;
2176 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2177 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2178 		trans_descr->mem_access_count = 1;
2179 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2180 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2181 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2182 					   sizeof(*trans_descr));
2183 	}
2184 	acc_descr_array->region_offs = 0;
2185 	acc_descr_array->reserved = 0;
2186 	perm_descr = &acc_descr_array->access_perm;
2187 	perm_descr->endpoint_id = my_endpoint_id;
2188 	perm_descr->perm = FFA_MEM_ACC_RW;
2189 	perm_descr->flags = 0;
2190 
2191 	args.a1 = size; /* Total Length */
2192 	args.a2 = size; /* Frag Length == Total length */
2193 	thread_smccc(&args);
2194 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2195 		if (args.a0 == FFA_ERROR)
2196 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2197 			     cookie, (int)args.a2);
2198 		else
2199 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2200 			     cookie, args.a0);
2201 		return NULL;
2202 	}
2203 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2204 				       my_rxtx.size, trans);
2205 	if (rc) {
2206 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2207 		     cookie, rc);
2208 		return NULL;
2209 	}
2210 
2211 	return my_rxtx.rx;
2212 }
2213 
2214 void thread_spmc_relinquish(uint64_t cookie)
2215 {
2216 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2217 	struct thread_smc_args args = {
2218 		.a0 = FFA_MEM_RELINQUISH,
2219 	};
2220 
2221 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2222 	relinquish_desc->handle = cookie;
2223 	relinquish_desc->flags = 0;
2224 	relinquish_desc->endpoint_count = 1;
2225 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
2226 	thread_smccc(&args);
2227 	if (!is_ffa_success(args.a0))
2228 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2229 }
2230 
2231 static int set_pages(struct ffa_address_range *regions,
2232 		     unsigned int num_regions, unsigned int num_pages,
2233 		     struct mobj_ffa *mf)
2234 {
2235 	unsigned int n = 0;
2236 	unsigned int idx = 0;
2237 
2238 	for (n = 0; n < num_regions; n++) {
2239 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2240 		uint64_t addr = READ_ONCE(regions[n].address);
2241 
2242 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2243 			return FFA_INVALID_PARAMETERS;
2244 	}
2245 
2246 	if (idx != num_pages)
2247 		return FFA_INVALID_PARAMETERS;
2248 
2249 	return 0;
2250 }
2251 
2252 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2253 {
2254 	struct mobj_ffa *ret = NULL;
2255 	struct ffa_mem_transaction_x retrieve_desc = { };
2256 	struct ffa_mem_access *descr_array = NULL;
2257 	struct ffa_mem_region *descr = NULL;
2258 	struct mobj_ffa *mf = NULL;
2259 	unsigned int num_pages = 0;
2260 	unsigned int offs = 0;
2261 	void *buf = NULL;
2262 	struct thread_smc_args ffa_rx_release_args = {
2263 		.a0 = FFA_RX_RELEASE
2264 	};
2265 
2266 	/*
2267 	 * OP-TEE is only supporting a single mem_region while the
2268 	 * specification allows for more than one.
2269 	 */
2270 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2271 	if (!buf) {
2272 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2273 		     cookie);
2274 		return NULL;
2275 	}
2276 
2277 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2278 	offs = READ_ONCE(descr_array->region_offs);
2279 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2280 
2281 	num_pages = READ_ONCE(descr->total_page_count);
2282 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2283 	if (!mf)
2284 		goto out;
2285 
2286 	if (set_pages(descr->address_range_array,
2287 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2288 		mobj_ffa_spmc_delete(mf);
2289 		goto out;
2290 	}
2291 
2292 	ret = mf;
2293 
2294 out:
2295 	/* Release RX buffer after the mem retrieve request. */
2296 	thread_smccc(&ffa_rx_release_args);
2297 
2298 	return ret;
2299 }
2300 
2301 static TEE_Result spmc_init(void)
2302 {
2303 	unsigned int major = 0;
2304 	unsigned int minor __maybe_unused = 0;
2305 	uint32_t my_vers = 0;
2306 	uint32_t vers = 0;
2307 
2308 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2309 	vers = get_ffa_version(my_vers);
2310 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2311 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2312 	DMSG("SPMC reported version %u.%u", major, minor);
2313 	if (major != FFA_VERSION_MAJOR) {
2314 		EMSG("Incompatible major version %u, expected %u",
2315 		     major, FFA_VERSION_MAJOR);
2316 		panic();
2317 	}
2318 	if (vers < my_vers)
2319 		my_vers = vers;
2320 	DMSG("Using version %u.%u",
2321 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2322 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2323 	my_rxtx.ffa_vers = my_vers;
2324 
2325 	spmc_rxtx_map(&my_rxtx);
2326 	my_endpoint_id = get_my_id();
2327 	DMSG("My endpoint ID %#x", my_endpoint_id);
2328 
2329 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2330 		spmc_notif_is_ready = true;
2331 		DMSG("Asynchronous notifications are ready");
2332 	}
2333 
2334 	return TEE_SUCCESS;
2335 }
2336 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2337 
2338 /*
2339  * boot_final() is always done before exiting at end of boot
2340  * initialization.  In case of virtualization the init-calls are done only
2341  * once a OP-TEE partition has been created. So with virtualization we have
2342  * to initialize via boot_final() to make sure we have a value assigned
2343  * before it's used the first time.
2344  */
2345 #ifdef CFG_NS_VIRTUALIZATION
2346 boot_final(spmc_init);
2347 #else
2348 service_init(spmc_init);
2349 #endif
2350