xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision a65dd3a6b64ddf6b3377babcb123c99d9b782d28)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_private.h>
19 #include <kernel/thread_spmc.h>
20 #include <kernel/virtualization.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <optee_ffa.h>
24 #include <optee_msg.h>
25 #include <optee_rpc_cmd.h>
26 #include <string.h>
27 #include <sys/queue.h>
28 #include <tee/entry_std.h>
29 #include <tee/uuid.h>
30 #include <util.h>
31 
32 #if defined(CFG_CORE_SEL1_SPMC)
33 struct mem_share_state {
34 	struct mobj_ffa *mf;
35 	unsigned int page_count;
36 	unsigned int region_count;
37 	unsigned int current_page_idx;
38 };
39 
40 struct mem_frag_state {
41 	struct mem_share_state share;
42 	tee_mm_entry_t *mm;
43 	unsigned int frag_offset;
44 	SLIST_ENTRY(mem_frag_state) link;
45 };
46 #endif
47 
48 /* Initialized in spmc_init() below */
49 static uint16_t my_endpoint_id __nex_bss;
50 #ifdef CFG_CORE_SEL1_SPMC
51 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
52 				      FFA_PART_PROP_DIRECT_REQ_SEND |
53 #ifdef CFG_NS_VIRTUALIZATION
54 				      FFA_PART_PROP_NOTIF_CREATED |
55 				      FFA_PART_PROP_NOTIF_DESTROYED |
56 #endif
57 #ifdef ARM64
58 				      FFA_PART_PROP_AARCH64_STATE |
59 #endif
60 				      FFA_PART_PROP_IS_PE_ID;
61 
62 static uint32_t my_uuid_words[] = {
63 	/*
64 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
65 	 *   SP, or
66 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
67 	 *   logical partition, residing in the same exception level as the
68 	 *   SPMC
69 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
70 	 */
71 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
72 };
73 
74 /*
75  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
76  *
77  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
78  * access this includes the use of content of struct ffa_rxtx::rx and
79  * @frag_state_head.
80  *
81  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
82  * ffa_rxtx::tx and false when it is owned by normal world.
83  *
84  * Note that we can't prevent normal world from updating the content of
85  * these buffers so we must always be careful when reading. while we hold
86  * the lock.
87  */
88 
89 static struct ffa_rxtx nw_rxtx __nex_bss;
90 
91 static bool is_nw_buf(struct ffa_rxtx *rxtx)
92 {
93 	return rxtx == &nw_rxtx;
94 }
95 
96 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
97 	SLIST_HEAD_INITIALIZER(&frag_state_head);
98 #else
99 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
100 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
101 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf };
102 #endif
103 
104 static uint32_t swap_src_dst(uint32_t src_dst)
105 {
106 	return (src_dst >> 16) | (src_dst << 16);
107 }
108 
109 static uint16_t get_sender_id(uint32_t src_dst)
110 {
111 	return src_dst >> 16;
112 }
113 
114 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
115 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
116 {
117 	*args = (struct thread_smc_args){ .a0 = fid,
118 					  .a1 = src_dst,
119 					  .a2 = w2,
120 					  .a3 = w3,
121 					  .a4 = w4,
122 					  .a5 = w5, };
123 }
124 
125 static uint32_t exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
126 {
127 	/*
128 	 * No locking, if the caller does concurrent calls to this it's
129 	 * only making a mess for itself. We must be able to renegotiate
130 	 * the FF-A version in order to support differing versions between
131 	 * the loader and the driver.
132 	 */
133 	if (vers < FFA_VERSION_1_1)
134 		rxtx->ffa_vers = FFA_VERSION_1_0;
135 	else
136 		rxtx->ffa_vers = FFA_VERSION_1_1;
137 
138 	return rxtx->ffa_vers;
139 }
140 
141 #if defined(CFG_CORE_SEL1_SPMC)
142 void spmc_handle_version(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
143 {
144 	spmc_set_args(args, exchange_version(args->a0, rxtx), FFA_PARAM_MBZ,
145 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
146 		      FFA_PARAM_MBZ);
147 }
148 
149 static void handle_features(struct thread_smc_args *args)
150 {
151 	uint32_t ret_fid = 0;
152 	uint32_t ret_w2 = FFA_PARAM_MBZ;
153 
154 	switch (args->a1) {
155 #ifdef ARM64
156 	case FFA_RXTX_MAP_64:
157 #endif
158 	case FFA_RXTX_MAP_32:
159 		ret_fid = FFA_SUCCESS_32;
160 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
161 		break;
162 #ifdef ARM64
163 	case FFA_MEM_SHARE_64:
164 #endif
165 	case FFA_MEM_SHARE_32:
166 		ret_fid = FFA_SUCCESS_32;
167 		/*
168 		 * Partition manager supports transmission of a memory
169 		 * transaction descriptor in a buffer dynamically allocated
170 		 * by the endpoint.
171 		 */
172 		ret_w2 = BIT(0);
173 		break;
174 
175 	case FFA_ERROR:
176 	case FFA_VERSION:
177 	case FFA_SUCCESS_32:
178 #ifdef ARM64
179 	case FFA_SUCCESS_64:
180 #endif
181 	case FFA_FEATURES:
182 	case FFA_MEM_FRAG_TX:
183 	case FFA_MEM_RECLAIM:
184 	case FFA_MSG_SEND_DIRECT_REQ_32:
185 	case FFA_INTERRUPT:
186 	case FFA_PARTITION_INFO_GET:
187 	case FFA_RXTX_UNMAP:
188 	case FFA_RX_RELEASE:
189 	case FFA_FEATURE_MANAGED_EXIT_INTR:
190 		ret_fid = FFA_SUCCESS_32;
191 		break;
192 	default:
193 		ret_fid = FFA_ERROR;
194 		ret_w2 = FFA_NOT_SUPPORTED;
195 		break;
196 	}
197 
198 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
199 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
200 }
201 
202 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
203 {
204 	tee_mm_entry_t *mm = NULL;
205 
206 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
207 		return FFA_INVALID_PARAMETERS;
208 
209 	mm = tee_mm_alloc(&tee_mm_shm, sz);
210 	if (!mm)
211 		return FFA_NO_MEMORY;
212 
213 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
214 					  sz / SMALL_PAGE_SIZE,
215 					  MEM_AREA_NSEC_SHM)) {
216 		tee_mm_free(mm);
217 		return FFA_INVALID_PARAMETERS;
218 	}
219 
220 	*va_ret = (void *)tee_mm_get_smem(mm);
221 	return 0;
222 }
223 
224 static void unmap_buf(void *va, size_t sz)
225 {
226 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
227 
228 	assert(mm);
229 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
230 	tee_mm_free(mm);
231 }
232 
233 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
234 {
235 	int rc = 0;
236 	uint32_t ret_fid = FFA_ERROR;
237 	unsigned int sz = 0;
238 	paddr_t rx_pa = 0;
239 	paddr_t tx_pa = 0;
240 	void *rx = NULL;
241 	void *tx = NULL;
242 
243 	cpu_spin_lock(&rxtx->spinlock);
244 
245 	if (args->a3 & GENMASK_64(63, 6)) {
246 		rc = FFA_INVALID_PARAMETERS;
247 		goto out;
248 	}
249 
250 	sz = args->a3 * SMALL_PAGE_SIZE;
251 	if (!sz) {
252 		rc = FFA_INVALID_PARAMETERS;
253 		goto out;
254 	}
255 	/* TX/RX are swapped compared to the caller */
256 	tx_pa = args->a2;
257 	rx_pa = args->a1;
258 
259 	if (rxtx->size) {
260 		rc = FFA_DENIED;
261 		goto out;
262 	}
263 
264 	/*
265 	 * If the buffer comes from a SP the address is virtual and already
266 	 * mapped.
267 	 */
268 	if (is_nw_buf(rxtx)) {
269 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
270 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
271 			bool tx_alloced = false;
272 
273 			/*
274 			 * With virtualization we establish this mapping in
275 			 * the nexus mapping which then is replicated to
276 			 * each partition.
277 			 *
278 			 * This means that this mapping must be done before
279 			 * any partition is created and then must not be
280 			 * changed.
281 			 */
282 
283 			/*
284 			 * core_mmu_add_mapping() may reuse previous
285 			 * mappings. First check if there's any mappings to
286 			 * reuse so we know how to clean up in case of
287 			 * failure.
288 			 */
289 			tx = phys_to_virt(tx_pa, mt, sz);
290 			rx = phys_to_virt(rx_pa, mt, sz);
291 			if (!tx) {
292 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
293 				if (!tx) {
294 					rc = FFA_NO_MEMORY;
295 					goto out;
296 				}
297 				tx_alloced = true;
298 			}
299 			if (!rx)
300 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
301 
302 			if (!rx) {
303 				if (tx_alloced && tx)
304 					core_mmu_remove_mapping(mt, tx, sz);
305 				rc = FFA_NO_MEMORY;
306 				goto out;
307 			}
308 		} else {
309 			rc = map_buf(tx_pa, sz, &tx);
310 			if (rc)
311 				goto out;
312 			rc = map_buf(rx_pa, sz, &rx);
313 			if (rc) {
314 				unmap_buf(tx, sz);
315 				goto out;
316 			}
317 		}
318 		rxtx->tx = tx;
319 		rxtx->rx = rx;
320 	} else {
321 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
322 			rc = FFA_INVALID_PARAMETERS;
323 			goto out;
324 		}
325 
326 		if (!virt_to_phys((void *)tx_pa) ||
327 		    !virt_to_phys((void *)rx_pa)) {
328 			rc = FFA_INVALID_PARAMETERS;
329 			goto out;
330 		}
331 
332 		rxtx->tx = (void *)tx_pa;
333 		rxtx->rx = (void *)rx_pa;
334 	}
335 
336 	rxtx->size = sz;
337 	rxtx->tx_is_mine = true;
338 	ret_fid = FFA_SUCCESS_32;
339 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
340 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
341 out:
342 	cpu_spin_unlock(&rxtx->spinlock);
343 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
344 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
345 }
346 
347 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
348 {
349 	uint32_t ret_fid = FFA_ERROR;
350 	int rc = FFA_INVALID_PARAMETERS;
351 
352 	cpu_spin_lock(&rxtx->spinlock);
353 
354 	if (!rxtx->size)
355 		goto out;
356 
357 	/* We don't unmap the SP memory as the SP might still use it */
358 	if (is_nw_buf(rxtx)) {
359 		unmap_buf(rxtx->rx, rxtx->size);
360 		unmap_buf(rxtx->tx, rxtx->size);
361 	}
362 	rxtx->size = 0;
363 	rxtx->rx = NULL;
364 	rxtx->tx = NULL;
365 	ret_fid = FFA_SUCCESS_32;
366 	rc = 0;
367 out:
368 	cpu_spin_unlock(&rxtx->spinlock);
369 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
370 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
371 }
372 
373 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
374 {
375 	uint32_t ret_fid = 0;
376 	int rc = 0;
377 
378 	cpu_spin_lock(&rxtx->spinlock);
379 	/* The senders RX is our TX */
380 	if (!rxtx->size || rxtx->tx_is_mine) {
381 		ret_fid = FFA_ERROR;
382 		rc = FFA_DENIED;
383 	} else {
384 		ret_fid = FFA_SUCCESS_32;
385 		rc = 0;
386 		rxtx->tx_is_mine = true;
387 	}
388 	cpu_spin_unlock(&rxtx->spinlock);
389 
390 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
391 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
392 }
393 
394 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
395 {
396 	return !w0 && !w1 && !w2 && !w3;
397 }
398 
399 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
400 {
401 	/*
402 	 * This depends on which UUID we have been assigned.
403 	 * TODO add a generic mechanism to obtain our UUID.
404 	 *
405 	 * The test below is for the hard coded UUID
406 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
407 	 */
408 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
409 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
410 }
411 
412 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
413 				     size_t idx, uint16_t endpoint_id,
414 				     uint16_t execution_context,
415 				     uint32_t part_props,
416 				     const uint32_t uuid_words[4])
417 {
418 	struct ffa_partition_info_x *fpi = NULL;
419 	size_t fpi_size = sizeof(*fpi);
420 
421 	if (ffa_vers >= FFA_VERSION_1_1)
422 		fpi_size += FFA_UUID_SIZE;
423 
424 	if ((idx + 1) * fpi_size > blen)
425 		return TEE_ERROR_OUT_OF_MEMORY;
426 
427 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
428 	fpi->id = endpoint_id;
429 	/* Number of execution contexts implemented by this partition */
430 	fpi->execution_context = execution_context;
431 
432 	fpi->partition_properties = part_props;
433 
434 	if (ffa_vers >= FFA_VERSION_1_1) {
435 		if (uuid_words)
436 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
437 		else
438 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
439 	}
440 
441 	return TEE_SUCCESS;
442 }
443 
444 static int handle_partition_info_get_all(size_t *elem_count,
445 					 struct ffa_rxtx *rxtx, bool count_only)
446 {
447 	if (!count_only) {
448 		/* Add OP-TEE SP */
449 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
450 					      rxtx->size, 0, my_endpoint_id,
451 					      CFG_TEE_CORE_NB_CORE,
452 					      my_part_props, my_uuid_words))
453 			return FFA_NO_MEMORY;
454 	}
455 	*elem_count = 1;
456 
457 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
458 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
459 					  NULL, elem_count, count_only))
460 			return FFA_NO_MEMORY;
461 	}
462 
463 	return FFA_OK;
464 }
465 
466 void spmc_handle_partition_info_get(struct thread_smc_args *args,
467 				    struct ffa_rxtx *rxtx)
468 {
469 	TEE_Result res = TEE_SUCCESS;
470 	uint32_t ret_fid = FFA_ERROR;
471 	uint32_t rc = 0;
472 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
473 
474 	if (!count_only) {
475 		cpu_spin_lock(&rxtx->spinlock);
476 
477 		if (!rxtx->size || !rxtx->tx_is_mine) {
478 			rc = FFA_BUSY;
479 			goto out;
480 		}
481 	}
482 
483 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
484 		size_t elem_count = 0;
485 
486 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
487 							count_only);
488 
489 		if (ret_fid) {
490 			rc = ret_fid;
491 			ret_fid = FFA_ERROR;
492 		} else {
493 			ret_fid = FFA_SUCCESS_32;
494 			rc = elem_count;
495 		}
496 
497 		goto out;
498 	}
499 
500 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
501 		if (!count_only) {
502 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
503 							rxtx->tx, rxtx->size, 0,
504 							my_endpoint_id,
505 							CFG_TEE_CORE_NB_CORE,
506 							my_part_props,
507 							my_uuid_words);
508 			if (res) {
509 				ret_fid = FFA_ERROR;
510 				rc = FFA_INVALID_PARAMETERS;
511 				goto out;
512 			}
513 		}
514 		rc = 1;
515 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
516 		uint32_t uuid_array[4] = { 0 };
517 		TEE_UUID uuid = { };
518 		size_t count = 0;
519 
520 		uuid_array[0] = args->a1;
521 		uuid_array[1] = args->a2;
522 		uuid_array[2] = args->a3;
523 		uuid_array[3] = args->a4;
524 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
525 
526 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
527 					    rxtx->size, &uuid, &count,
528 					    count_only);
529 		if (res != TEE_SUCCESS) {
530 			ret_fid = FFA_ERROR;
531 			rc = FFA_INVALID_PARAMETERS;
532 			goto out;
533 		}
534 		rc = count;
535 	} else {
536 		ret_fid = FFA_ERROR;
537 		rc = FFA_INVALID_PARAMETERS;
538 		goto out;
539 	}
540 
541 	ret_fid = FFA_SUCCESS_32;
542 
543 out:
544 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
545 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
546 	if (!count_only) {
547 		rxtx->tx_is_mine = false;
548 		cpu_spin_unlock(&rxtx->spinlock);
549 	}
550 }
551 #endif /*CFG_CORE_SEL1_SPMC*/
552 
553 static void handle_yielding_call(struct thread_smc_args *args)
554 {
555 	TEE_Result res = 0;
556 
557 	thread_check_canaries();
558 
559 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
560 		/* Note connection to struct thread_rpc_arg::ret */
561 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
562 				       0);
563 		res = TEE_ERROR_BAD_PARAMETERS;
564 	} else {
565 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
566 				     args->a6, args->a7);
567 		res = TEE_ERROR_BUSY;
568 	}
569 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
570 		      swap_src_dst(args->a1), 0, res, 0, 0);
571 }
572 
573 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
574 {
575 	uint64_t cookie = reg_pair_to_64(a5, a4);
576 	uint32_t res = 0;
577 
578 	res = mobj_ffa_unregister_by_cookie(cookie);
579 	switch (res) {
580 	case TEE_SUCCESS:
581 	case TEE_ERROR_ITEM_NOT_FOUND:
582 		return 0;
583 	case TEE_ERROR_BUSY:
584 		EMSG("res %#"PRIx32, res);
585 		return FFA_BUSY;
586 	default:
587 		EMSG("res %#"PRIx32, res);
588 		return FFA_INVALID_PARAMETERS;
589 	}
590 }
591 
592 static void handle_blocking_call(struct thread_smc_args *args)
593 {
594 	switch (args->a3) {
595 	case OPTEE_FFA_GET_API_VERSION:
596 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
597 			      swap_src_dst(args->a1), 0,
598 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
599 			      0);
600 		break;
601 	case OPTEE_FFA_GET_OS_VERSION:
602 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
603 			      swap_src_dst(args->a1), 0,
604 			      CFG_OPTEE_REVISION_MAJOR,
605 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
606 		break;
607 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
608 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
609 			      swap_src_dst(args->a1), 0, 0,
610 			      THREAD_RPC_MAX_NUM_PARAMS,
611 			      OPTEE_FFA_SEC_CAP_ARG_OFFSET);
612 		break;
613 	case OPTEE_FFA_UNREGISTER_SHM:
614 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
615 			      swap_src_dst(args->a1), 0,
616 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
617 		break;
618 	default:
619 		EMSG("Unhandled blocking service ID %#"PRIx32,
620 		     (uint32_t)args->a3);
621 		panic();
622 	}
623 }
624 
625 static void handle_framework_direct_request(struct thread_smc_args *args,
626 					    struct ffa_rxtx *rxtx)
627 {
628 	uint32_t w0 = FFA_ERROR;
629 	uint32_t w1 = FFA_PARAM_MBZ;
630 	uint32_t w2 = FFA_NOT_SUPPORTED;
631 	uint32_t w3 = FFA_PARAM_MBZ;
632 
633 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
634 	case FFA_MSG_SEND_VM_CREATED:
635 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
636 			uint16_t guest_id = args->a5;
637 			TEE_Result res = virt_guest_created(guest_id);
638 
639 			w0 = FFA_MSG_SEND_DIRECT_RESP_32;
640 			w1 = swap_src_dst(args->a1);
641 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
642 			if (res == TEE_SUCCESS)
643 				w3 = FFA_OK;
644 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
645 				w3 = FFA_DENIED;
646 			else
647 				w3 = FFA_INVALID_PARAMETERS;
648 		}
649 		break;
650 	case FFA_MSG_VERSION_REQ:
651 		w0 = FFA_MSG_SEND_DIRECT_RESP_32;
652 		w1 = swap_src_dst(args->a1);
653 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
654 		w3 = exchange_version(args->a3, rxtx);
655 		break;
656 	default:
657 		break;
658 	}
659 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
660 }
661 
662 static void handle_direct_request(struct thread_smc_args *args,
663 				  struct ffa_rxtx *rxtx)
664 {
665 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
666 	    FFA_DST(args->a1) != my_endpoint_id) {
667 		spmc_sp_start_thread(args);
668 		return;
669 	}
670 
671 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
672 		handle_framework_direct_request(args, rxtx);
673 		return;
674 	}
675 
676 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
677 	    virt_set_guest(get_sender_id(args->a1))) {
678 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
679 			      swap_src_dst(args->a1), 0,
680 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
681 		return;
682 	}
683 
684 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
685 		handle_yielding_call(args);
686 	else
687 		handle_blocking_call(args);
688 
689 	/*
690 	 * Note that handle_yielding_call() typically only returns if a
691 	 * thread cannot be allocated or found. virt_unset_guest() is also
692 	 * called from thread_state_suspend() and thread_state_free().
693 	 */
694 	virt_unset_guest();
695 }
696 
697 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
698 			      struct ffa_mem_transaction_x *trans)
699 {
700 	uint16_t mem_reg_attr = 0;
701 	uint32_t flags = 0;
702 	uint32_t count = 0;
703 	uint32_t offs = 0;
704 	uint32_t size = 0;
705 	size_t n = 0;
706 
707 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
708 		return FFA_INVALID_PARAMETERS;
709 
710 	if (ffa_vers >= FFA_VERSION_1_1) {
711 		struct ffa_mem_transaction_1_1 *descr = NULL;
712 
713 		if (blen < sizeof(*descr))
714 			return FFA_INVALID_PARAMETERS;
715 
716 		descr = buf;
717 		trans->sender_id = READ_ONCE(descr->sender_id);
718 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
719 		flags = READ_ONCE(descr->flags);
720 		trans->global_handle = READ_ONCE(descr->global_handle);
721 		trans->tag = READ_ONCE(descr->tag);
722 
723 		count = READ_ONCE(descr->mem_access_count);
724 		size = READ_ONCE(descr->mem_access_size);
725 		offs = READ_ONCE(descr->mem_access_offs);
726 	} else {
727 		struct ffa_mem_transaction_1_0 *descr = NULL;
728 
729 		if (blen < sizeof(*descr))
730 			return FFA_INVALID_PARAMETERS;
731 
732 		descr = buf;
733 		trans->sender_id = READ_ONCE(descr->sender_id);
734 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
735 		flags = READ_ONCE(descr->flags);
736 		trans->global_handle = READ_ONCE(descr->global_handle);
737 		trans->tag = READ_ONCE(descr->tag);
738 
739 		count = READ_ONCE(descr->mem_access_count);
740 		size = sizeof(struct ffa_mem_access);
741 		offs = offsetof(struct ffa_mem_transaction_1_0,
742 				mem_access_array);
743 	}
744 
745 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
746 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
747 		return FFA_INVALID_PARAMETERS;
748 
749 	/* Check that the endpoint memory access descriptor array fits */
750 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
751 	    n > blen)
752 		return FFA_INVALID_PARAMETERS;
753 
754 	trans->mem_reg_attr = mem_reg_attr;
755 	trans->flags = flags;
756 	trans->mem_access_size = size;
757 	trans->mem_access_count = count;
758 	trans->mem_access_offs = offs;
759 	return 0;
760 }
761 
762 #if defined(CFG_CORE_SEL1_SPMC)
763 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
764 			 unsigned int mem_access_count, uint8_t *acc_perms,
765 			 unsigned int *region_offs)
766 {
767 	struct ffa_mem_access_perm *descr = NULL;
768 	struct ffa_mem_access *mem_acc = NULL;
769 	unsigned int n = 0;
770 
771 	for (n = 0; n < mem_access_count; n++) {
772 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
773 		descr = &mem_acc->access_perm;
774 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
775 			*acc_perms = READ_ONCE(descr->perm);
776 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
777 			return 0;
778 		}
779 	}
780 
781 	return FFA_INVALID_PARAMETERS;
782 }
783 
784 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
785 			  size_t blen, unsigned int *page_count,
786 			  unsigned int *region_count, size_t *addr_range_offs)
787 {
788 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
789 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
790 	struct ffa_mem_region *region_descr = NULL;
791 	unsigned int region_descr_offs = 0;
792 	uint8_t mem_acc_perm = 0;
793 	size_t n = 0;
794 
795 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
796 		return FFA_INVALID_PARAMETERS;
797 
798 	/* Check that the access permissions matches what's expected */
799 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
800 			  mem_trans->mem_access_size,
801 			  mem_trans->mem_access_count,
802 			  &mem_acc_perm, &region_descr_offs) ||
803 	    mem_acc_perm != exp_mem_acc_perm)
804 		return FFA_INVALID_PARAMETERS;
805 
806 	/* Check that the Composite memory region descriptor fits */
807 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
808 	    n > blen)
809 		return FFA_INVALID_PARAMETERS;
810 
811 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
812 				  struct ffa_mem_region))
813 		return FFA_INVALID_PARAMETERS;
814 
815 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
816 						 region_descr_offs);
817 	*page_count = READ_ONCE(region_descr->total_page_count);
818 	*region_count = READ_ONCE(region_descr->address_range_count);
819 	*addr_range_offs = n;
820 	return 0;
821 }
822 
823 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
824 				size_t flen)
825 {
826 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
827 	struct ffa_address_range *arange = NULL;
828 	unsigned int n = 0;
829 
830 	if (region_count > s->region_count)
831 		region_count = s->region_count;
832 
833 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
834 		return FFA_INVALID_PARAMETERS;
835 	arange = buf;
836 
837 	for (n = 0; n < region_count; n++) {
838 		unsigned int page_count = READ_ONCE(arange[n].page_count);
839 		uint64_t addr = READ_ONCE(arange[n].address);
840 
841 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
842 					  addr, page_count))
843 			return FFA_INVALID_PARAMETERS;
844 	}
845 
846 	s->region_count -= region_count;
847 	if (s->region_count)
848 		return region_count * sizeof(*arange);
849 
850 	if (s->current_page_idx != s->page_count)
851 		return FFA_INVALID_PARAMETERS;
852 
853 	return 0;
854 }
855 
856 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
857 {
858 	int rc = 0;
859 
860 	rc = add_mem_share_helper(&s->share, buf, flen);
861 	if (rc >= 0) {
862 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
863 			/* We're not at the end of the descriptor yet */
864 			if (s->share.region_count)
865 				return s->frag_offset;
866 
867 			/* We're done */
868 			rc = 0;
869 		} else {
870 			rc = FFA_INVALID_PARAMETERS;
871 		}
872 	}
873 
874 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
875 	if (rc < 0)
876 		mobj_ffa_sel1_spmc_delete(s->share.mf);
877 	else
878 		mobj_ffa_push_to_inactive(s->share.mf);
879 	free(s);
880 
881 	return rc;
882 }
883 
884 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
885 			void *buf)
886 {
887 	struct ffa_mem_access_perm *perm = NULL;
888 	struct ffa_mem_access *mem_acc = NULL;
889 
890 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
891 		return false;
892 
893 	if (mem_trans->mem_access_count < 1)
894 		return false;
895 
896 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
897 	perm = &mem_acc->access_perm;
898 
899 	/*
900 	 * perm->endpoint_id is read here only to check if the endpoint is
901 	 * OP-TEE. We do read it later on again, but there are some additional
902 	 * checks there to make sure that the data is correct.
903 	 */
904 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
905 }
906 
907 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
908 			 tee_mm_entry_t *mm, void *buf, size_t blen,
909 			 size_t flen, uint64_t *global_handle)
910 {
911 	int rc = 0;
912 	struct mem_share_state share = { };
913 	size_t addr_range_offs = 0;
914 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
915 	size_t n = 0;
916 
917 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
918 			    &share.region_count, &addr_range_offs);
919 	if (rc)
920 		return rc;
921 
922 	if (MUL_OVERFLOW(share.region_count,
923 			 sizeof(struct ffa_address_range), &n) ||
924 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
925 		return FFA_INVALID_PARAMETERS;
926 
927 	if (mem_trans->global_handle)
928 		cookie = mem_trans->global_handle;
929 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
930 	if (!share.mf)
931 		return FFA_NO_MEMORY;
932 
933 	if (flen != blen) {
934 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
935 
936 		if (!s) {
937 			rc = FFA_NO_MEMORY;
938 			goto err;
939 		}
940 		s->share = share;
941 		s->mm = mm;
942 		s->frag_offset = addr_range_offs;
943 
944 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
945 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
946 					flen - addr_range_offs);
947 
948 		if (rc >= 0)
949 			*global_handle = mobj_ffa_get_cookie(share.mf);
950 
951 		return rc;
952 	}
953 
954 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
955 				  flen - addr_range_offs);
956 	if (rc) {
957 		/*
958 		 * Number of consumed bytes may be returned instead of 0 for
959 		 * done.
960 		 */
961 		rc = FFA_INVALID_PARAMETERS;
962 		goto err;
963 	}
964 
965 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
966 
967 	return 0;
968 err:
969 	mobj_ffa_sel1_spmc_delete(share.mf);
970 	return rc;
971 }
972 
973 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
974 				 unsigned int page_count,
975 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
976 {
977 	struct ffa_mem_transaction_x mem_trans = { };
978 	int rc = 0;
979 	size_t len = 0;
980 	void *buf = NULL;
981 	tee_mm_entry_t *mm = NULL;
982 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
983 
984 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
985 		return FFA_INVALID_PARAMETERS;
986 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
987 		return FFA_INVALID_PARAMETERS;
988 
989 	/*
990 	 * Check that the length reported in flen is covered by len even
991 	 * if the offset is taken into account.
992 	 */
993 	if (len < flen || len - offs < flen)
994 		return FFA_INVALID_PARAMETERS;
995 
996 	mm = tee_mm_alloc(&tee_mm_shm, len);
997 	if (!mm)
998 		return FFA_NO_MEMORY;
999 
1000 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1001 					  page_count, MEM_AREA_NSEC_SHM)) {
1002 		rc = FFA_INVALID_PARAMETERS;
1003 		goto out;
1004 	}
1005 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1006 
1007 	cpu_spin_lock(&rxtx->spinlock);
1008 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1009 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1010 	    virt_set_guest(mem_trans.sender_id))
1011 		rc = FFA_DENIED;
1012 	if (!rc)
1013 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1014 				   global_handle);
1015 	virt_unset_guest();
1016 	cpu_spin_unlock(&rxtx->spinlock);
1017 	if (rc > 0)
1018 		return rc;
1019 
1020 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1021 out:
1022 	tee_mm_free(mm);
1023 	return rc;
1024 }
1025 
1026 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1027 				  uint64_t *global_handle,
1028 				  struct ffa_rxtx *rxtx)
1029 {
1030 	struct ffa_mem_transaction_x mem_trans = { };
1031 	int rc = FFA_DENIED;
1032 
1033 	cpu_spin_lock(&rxtx->spinlock);
1034 
1035 	if (!rxtx->rx || flen > rxtx->size)
1036 		goto out;
1037 
1038 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1039 				       &mem_trans);
1040 	if (rc)
1041 		goto out;
1042 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1043 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1044 				       global_handle, NULL);
1045 		goto out;
1046 	}
1047 
1048 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1049 	    virt_set_guest(mem_trans.sender_id))
1050 		goto out;
1051 
1052 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1053 			   global_handle);
1054 
1055 	virt_unset_guest();
1056 
1057 out:
1058 	cpu_spin_unlock(&rxtx->spinlock);
1059 
1060 	return rc;
1061 }
1062 
1063 static void handle_mem_share(struct thread_smc_args *args,
1064 			     struct ffa_rxtx *rxtx)
1065 {
1066 	uint32_t tot_len = args->a1;
1067 	uint32_t frag_len = args->a2;
1068 	uint64_t addr = args->a3;
1069 	uint32_t page_count = args->a4;
1070 	uint32_t ret_w1 = 0;
1071 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1072 	uint32_t ret_w3 = 0;
1073 	uint32_t ret_fid = FFA_ERROR;
1074 	uint64_t global_handle = 0;
1075 	int rc = 0;
1076 
1077 	/* Check that the MBZs are indeed 0 */
1078 	if (args->a5 || args->a6 || args->a7)
1079 		goto out;
1080 
1081 	/* Check that fragment length doesn't exceed total length */
1082 	if (frag_len > tot_len)
1083 		goto out;
1084 
1085 	/* Check for 32-bit calling convention */
1086 	if (args->a0 == FFA_MEM_SHARE_32)
1087 		addr &= UINT32_MAX;
1088 
1089 	if (!addr) {
1090 		/*
1091 		 * The memory transaction descriptor is passed via our rx
1092 		 * buffer.
1093 		 */
1094 		if (page_count)
1095 			goto out;
1096 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1097 					    rxtx);
1098 	} else {
1099 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1100 					   &global_handle, rxtx);
1101 	}
1102 	if (rc < 0) {
1103 		ret_w2 = rc;
1104 	} else if (rc > 0) {
1105 		ret_fid = FFA_MEM_FRAG_RX;
1106 		ret_w3 = rc;
1107 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1108 	} else {
1109 		ret_fid = FFA_SUCCESS_32;
1110 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1111 	}
1112 out:
1113 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1114 }
1115 
1116 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1117 {
1118 	struct mem_frag_state *s = NULL;
1119 
1120 	SLIST_FOREACH(s, &frag_state_head, link)
1121 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1122 			return s;
1123 
1124 	return NULL;
1125 }
1126 
1127 static void handle_mem_frag_tx(struct thread_smc_args *args,
1128 			       struct ffa_rxtx *rxtx)
1129 {
1130 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1131 	size_t flen = args->a3;
1132 	uint32_t endpoint_id = args->a4;
1133 	struct mem_frag_state *s = NULL;
1134 	tee_mm_entry_t *mm = NULL;
1135 	unsigned int page_count = 0;
1136 	void *buf = NULL;
1137 	uint32_t ret_w1 = 0;
1138 	uint32_t ret_w2 = 0;
1139 	uint32_t ret_w3 = 0;
1140 	uint32_t ret_fid = 0;
1141 	int rc = 0;
1142 
1143 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1144 		uint16_t guest_id = endpoint_id >> 16;
1145 
1146 		if (!guest_id || virt_set_guest(guest_id)) {
1147 			rc = FFA_INVALID_PARAMETERS;
1148 			goto out_set_rc;
1149 		}
1150 	}
1151 
1152 	/*
1153 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1154 	 * requests.
1155 	 */
1156 
1157 	cpu_spin_lock(&rxtx->spinlock);
1158 
1159 	s = get_frag_state(global_handle);
1160 	if (!s) {
1161 		rc = FFA_INVALID_PARAMETERS;
1162 		goto out;
1163 	}
1164 
1165 	mm = s->mm;
1166 	if (mm) {
1167 		if (flen > tee_mm_get_bytes(mm)) {
1168 			rc = FFA_INVALID_PARAMETERS;
1169 			goto out;
1170 		}
1171 		page_count = s->share.page_count;
1172 		buf = (void *)tee_mm_get_smem(mm);
1173 	} else {
1174 		if (flen > rxtx->size) {
1175 			rc = FFA_INVALID_PARAMETERS;
1176 			goto out;
1177 		}
1178 		buf = rxtx->rx;
1179 	}
1180 
1181 	rc = add_mem_share_frag(s, buf, flen);
1182 out:
1183 	virt_unset_guest();
1184 	cpu_spin_unlock(&rxtx->spinlock);
1185 
1186 	if (rc <= 0 && mm) {
1187 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1188 		tee_mm_free(mm);
1189 	}
1190 
1191 out_set_rc:
1192 	if (rc < 0) {
1193 		ret_fid = FFA_ERROR;
1194 		ret_w2 = rc;
1195 	} else if (rc > 0) {
1196 		ret_fid = FFA_MEM_FRAG_RX;
1197 		ret_w3 = rc;
1198 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1199 	} else {
1200 		ret_fid = FFA_SUCCESS_32;
1201 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1202 	}
1203 
1204 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1205 }
1206 
1207 static void handle_mem_reclaim(struct thread_smc_args *args)
1208 {
1209 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1210 	uint32_t ret_fid = FFA_ERROR;
1211 	uint64_t cookie = 0;
1212 
1213 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1214 		goto out;
1215 
1216 	cookie = reg_pair_to_64(args->a2, args->a1);
1217 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1218 		uint16_t guest_id = 0;
1219 
1220 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1221 			guest_id = virt_find_guest_by_cookie(cookie);
1222 		} else {
1223 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1224 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1225 		}
1226 		if (!guest_id || virt_set_guest(guest_id))
1227 			goto out;
1228 	}
1229 
1230 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1231 	case TEE_SUCCESS:
1232 		ret_fid = FFA_SUCCESS_32;
1233 		ret_val = 0;
1234 		break;
1235 	case TEE_ERROR_ITEM_NOT_FOUND:
1236 		DMSG("cookie %#"PRIx64" not found", cookie);
1237 		ret_val = FFA_INVALID_PARAMETERS;
1238 		break;
1239 	default:
1240 		DMSG("cookie %#"PRIx64" busy", cookie);
1241 		ret_val = FFA_DENIED;
1242 		break;
1243 	}
1244 
1245 	virt_unset_guest();
1246 
1247 out:
1248 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
1249 }
1250 #endif
1251 
1252 /* Only called from assembly */
1253 void thread_spmc_msg_recv(struct thread_smc_args *args);
1254 void thread_spmc_msg_recv(struct thread_smc_args *args)
1255 {
1256 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1257 	switch (args->a0) {
1258 #if defined(CFG_CORE_SEL1_SPMC)
1259 	case FFA_VERSION:
1260 		spmc_handle_version(args, &nw_rxtx);
1261 		break;
1262 	case FFA_FEATURES:
1263 		handle_features(args);
1264 		break;
1265 #ifdef ARM64
1266 	case FFA_RXTX_MAP_64:
1267 #endif
1268 	case FFA_RXTX_MAP_32:
1269 		spmc_handle_rxtx_map(args, &nw_rxtx);
1270 		break;
1271 	case FFA_RXTX_UNMAP:
1272 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
1273 		break;
1274 	case FFA_RX_RELEASE:
1275 		spmc_handle_rx_release(args, &nw_rxtx);
1276 		break;
1277 	case FFA_PARTITION_INFO_GET:
1278 		spmc_handle_partition_info_get(args, &nw_rxtx);
1279 		break;
1280 #endif /*CFG_CORE_SEL1_SPMC*/
1281 	case FFA_INTERRUPT:
1282 		interrupt_main_handler();
1283 		spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1284 		break;
1285 #ifdef ARM64
1286 	case FFA_MSG_SEND_DIRECT_REQ_64:
1287 #endif
1288 	case FFA_MSG_SEND_DIRECT_REQ_32:
1289 		handle_direct_request(args, &nw_rxtx);
1290 		break;
1291 #if defined(CFG_CORE_SEL1_SPMC)
1292 #ifdef ARM64
1293 	case FFA_MEM_SHARE_64:
1294 #endif
1295 	case FFA_MEM_SHARE_32:
1296 		handle_mem_share(args, &nw_rxtx);
1297 		break;
1298 	case FFA_MEM_RECLAIM:
1299 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1300 		    !ffa_mem_reclaim(args, NULL))
1301 			handle_mem_reclaim(args);
1302 		break;
1303 	case FFA_MEM_FRAG_TX:
1304 		handle_mem_frag_tx(args, &nw_rxtx);
1305 		break;
1306 #endif /*CFG_CORE_SEL1_SPMC*/
1307 	default:
1308 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1309 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1310 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1311 	}
1312 }
1313 
1314 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1315 {
1316 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1317 	struct thread_ctx *thr = threads + thread_get_id();
1318 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1319 	struct optee_msg_arg *arg = NULL;
1320 	struct mobj *mobj = NULL;
1321 	uint32_t num_params = 0;
1322 	size_t sz = 0;
1323 
1324 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1325 	if (!mobj) {
1326 		EMSG("Can't find cookie %#"PRIx64, cookie);
1327 		return TEE_ERROR_BAD_PARAMETERS;
1328 	}
1329 
1330 	res = mobj_inc_map(mobj);
1331 	if (res)
1332 		goto out_put_mobj;
1333 
1334 	res = TEE_ERROR_BAD_PARAMETERS;
1335 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1336 	if (!arg)
1337 		goto out_dec_map;
1338 
1339 	num_params = READ_ONCE(arg->num_params);
1340 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1341 		goto out_dec_map;
1342 
1343 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1344 
1345 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1346 	if (!thr->rpc_arg)
1347 		goto out_dec_map;
1348 
1349 	virt_on_stdcall();
1350 	res = tee_entry_std(arg, num_params);
1351 
1352 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1353 	thr->rpc_arg = NULL;
1354 
1355 out_dec_map:
1356 	mobj_dec_map(mobj);
1357 out_put_mobj:
1358 	mobj_put(mobj);
1359 	return res;
1360 }
1361 
1362 /*
1363  * Helper routine for the assembly function thread_std_smc_entry()
1364  *
1365  * Note: this function is weak just to make link_dummies_paged.c happy.
1366  */
1367 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1368 				       uint32_t a2, uint32_t a3,
1369 				       uint32_t a4, uint32_t a5 __unused)
1370 {
1371 	/*
1372 	 * Arguments are supplied from handle_yielding_call() as:
1373 	 * a0 <- w1
1374 	 * a1 <- w3
1375 	 * a2 <- w4
1376 	 * a3 <- w5
1377 	 * a4 <- w6
1378 	 * a5 <- w7
1379 	 */
1380 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1381 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1382 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1383 	return FFA_DENIED;
1384 }
1385 
1386 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1387 {
1388 	uint64_t offs = tpm->u.memref.offs;
1389 
1390 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1391 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1392 
1393 	param->u.fmem.offs_low = offs;
1394 	param->u.fmem.offs_high = offs >> 32;
1395 	if (param->u.fmem.offs_high != offs >> 32)
1396 		return false;
1397 
1398 	param->u.fmem.size = tpm->u.memref.size;
1399 	if (tpm->u.memref.mobj) {
1400 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1401 
1402 		/* If a mobj is passed it better be one with a valid cookie. */
1403 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1404 			return false;
1405 		param->u.fmem.global_id = cookie;
1406 	} else {
1407 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1408 	}
1409 
1410 	return true;
1411 }
1412 
1413 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1414 			    struct thread_param *params,
1415 			    struct optee_msg_arg **arg_ret)
1416 {
1417 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1418 	struct thread_ctx *thr = threads + thread_get_id();
1419 	struct optee_msg_arg *arg = thr->rpc_arg;
1420 
1421 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1422 		return TEE_ERROR_BAD_PARAMETERS;
1423 
1424 	if (!arg) {
1425 		EMSG("rpc_arg not set");
1426 		return TEE_ERROR_GENERIC;
1427 	}
1428 
1429 	memset(arg, 0, sz);
1430 	arg->cmd = cmd;
1431 	arg->num_params = num_params;
1432 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1433 
1434 	for (size_t n = 0; n < num_params; n++) {
1435 		switch (params[n].attr) {
1436 		case THREAD_PARAM_ATTR_NONE:
1437 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1438 			break;
1439 		case THREAD_PARAM_ATTR_VALUE_IN:
1440 		case THREAD_PARAM_ATTR_VALUE_OUT:
1441 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1442 			arg->params[n].attr = params[n].attr -
1443 					      THREAD_PARAM_ATTR_VALUE_IN +
1444 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1445 			arg->params[n].u.value.a = params[n].u.value.a;
1446 			arg->params[n].u.value.b = params[n].u.value.b;
1447 			arg->params[n].u.value.c = params[n].u.value.c;
1448 			break;
1449 		case THREAD_PARAM_ATTR_MEMREF_IN:
1450 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1451 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1452 			if (!set_fmem(arg->params + n, params + n))
1453 				return TEE_ERROR_BAD_PARAMETERS;
1454 			break;
1455 		default:
1456 			return TEE_ERROR_BAD_PARAMETERS;
1457 		}
1458 	}
1459 
1460 	if (arg_ret)
1461 		*arg_ret = arg;
1462 
1463 	return TEE_SUCCESS;
1464 }
1465 
1466 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1467 				struct thread_param *params)
1468 {
1469 	for (size_t n = 0; n < num_params; n++) {
1470 		switch (params[n].attr) {
1471 		case THREAD_PARAM_ATTR_VALUE_OUT:
1472 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1473 			params[n].u.value.a = arg->params[n].u.value.a;
1474 			params[n].u.value.b = arg->params[n].u.value.b;
1475 			params[n].u.value.c = arg->params[n].u.value.c;
1476 			break;
1477 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1478 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1479 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1480 			break;
1481 		default:
1482 			break;
1483 		}
1484 	}
1485 
1486 	return arg->ret;
1487 }
1488 
1489 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1490 			struct thread_param *params)
1491 {
1492 	struct thread_rpc_arg rpc_arg = { .call = {
1493 			.w1 = thread_get_tsd()->rpc_target_info,
1494 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1495 		},
1496 	};
1497 	struct optee_msg_arg *arg = NULL;
1498 	uint32_t ret = 0;
1499 
1500 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1501 	if (ret)
1502 		return ret;
1503 
1504 	thread_rpc(&rpc_arg);
1505 
1506 	return get_rpc_arg_res(arg, num_params, params);
1507 }
1508 
1509 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1510 {
1511 	struct thread_rpc_arg rpc_arg = { .call = {
1512 			.w1 = thread_get_tsd()->rpc_target_info,
1513 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1514 		},
1515 	};
1516 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1517 	uint32_t res2 = 0;
1518 	uint32_t res = 0;
1519 
1520 	DMSG("freeing cookie %#"PRIx64, cookie);
1521 
1522 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1523 
1524 	mobj_put(mobj);
1525 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1526 	if (res2)
1527 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1528 		     cookie, res2);
1529 	if (!res)
1530 		thread_rpc(&rpc_arg);
1531 }
1532 
1533 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1534 {
1535 	struct thread_rpc_arg rpc_arg = { .call = {
1536 			.w1 = thread_get_tsd()->rpc_target_info,
1537 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1538 		},
1539 	};
1540 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1541 	struct optee_msg_arg *arg = NULL;
1542 	unsigned int internal_offset = 0;
1543 	struct mobj *mobj = NULL;
1544 	uint64_t cookie = 0;
1545 
1546 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1547 		return NULL;
1548 
1549 	thread_rpc(&rpc_arg);
1550 
1551 	if (arg->num_params != 1 ||
1552 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1553 		return NULL;
1554 
1555 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1556 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1557 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1558 	if (!mobj) {
1559 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1560 		     cookie, internal_offset);
1561 		return NULL;
1562 	}
1563 
1564 	assert(mobj_is_nonsec(mobj));
1565 
1566 	if (mobj->size < size) {
1567 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1568 		mobj_put(mobj);
1569 		return NULL;
1570 	}
1571 
1572 	if (mobj_inc_map(mobj)) {
1573 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1574 		mobj_put(mobj);
1575 		return NULL;
1576 	}
1577 
1578 	return mobj;
1579 }
1580 
1581 struct mobj *thread_rpc_alloc_payload(size_t size)
1582 {
1583 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1584 }
1585 
1586 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1587 {
1588 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1589 }
1590 
1591 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1592 {
1593 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1594 }
1595 
1596 void thread_rpc_free_payload(struct mobj *mobj)
1597 {
1598 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1599 			mobj);
1600 }
1601 
1602 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1603 {
1604 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1605 }
1606 
1607 void thread_rpc_free_global_payload(struct mobj *mobj)
1608 {
1609 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1610 			mobj);
1611 }
1612 
1613 void thread_spmc_register_secondary_ep(vaddr_t ep)
1614 {
1615 	unsigned long ret = 0;
1616 
1617 	/* Let the SPM know the entry point for secondary CPUs */
1618 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
1619 
1620 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
1621 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
1622 }
1623 
1624 #if defined(CFG_CORE_SEL1_SPMC)
1625 static TEE_Result spmc_init(void)
1626 {
1627 	my_endpoint_id = SPMC_ENDPOINT_ID;
1628 	DMSG("My endpoint ID %#x", my_endpoint_id);
1629 
1630 	/*
1631 	 * If SPMD think we are version 1.0 it will report version 1.0 to
1632 	 * normal world regardless of what version we query the SPM with.
1633 	 * However, if SPMD think we are version 1.1 it will forward
1634 	 * queries from normal world to let us negotiate version. So by
1635 	 * setting version 1.0 here we should be compatible.
1636 	 *
1637 	 * Note that disagreement on negotiated version means that we'll
1638 	 * have communication problems with normal world.
1639 	 */
1640 	nw_rxtx.ffa_vers = FFA_VERSION_1_0;
1641 
1642 	return TEE_SUCCESS;
1643 }
1644 #else /* !defined(CFG_CORE_SEL1_SPMC) */
1645 static bool is_ffa_success(uint32_t fid)
1646 {
1647 #ifdef ARM64
1648 	if (fid == FFA_SUCCESS_64)
1649 		return true;
1650 #endif
1651 	return fid == FFA_SUCCESS_32;
1652 }
1653 
1654 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1655 {
1656 	struct thread_smc_args args = {
1657 #ifdef ARM64
1658 		.a0 = FFA_RXTX_MAP_64,
1659 #else
1660 		.a0 = FFA_RXTX_MAP_32,
1661 #endif
1662 		.a1 = virt_to_phys(rxtx->tx),
1663 		.a2 = virt_to_phys(rxtx->rx),
1664 		.a3 = 1,
1665 	};
1666 
1667 	thread_smccc(&args);
1668 	if (!is_ffa_success(args.a0)) {
1669 		if (args.a0 == FFA_ERROR)
1670 			EMSG("rxtx map failed with error %ld", args.a2);
1671 		else
1672 			EMSG("rxtx map failed");
1673 		panic();
1674 	}
1675 }
1676 
1677 static uint16_t spmc_get_id(void)
1678 {
1679 	struct thread_smc_args args = {
1680 		.a0 = FFA_ID_GET,
1681 	};
1682 
1683 	thread_smccc(&args);
1684 	if (!is_ffa_success(args.a0)) {
1685 		if (args.a0 == FFA_ERROR)
1686 			EMSG("Get id failed with error %ld", args.a2);
1687 		else
1688 			EMSG("Get id failed");
1689 		panic();
1690 	}
1691 
1692 	return args.a2;
1693 }
1694 
1695 static uint32_t get_ffa_version(uint32_t my_version)
1696 {
1697 	struct thread_smc_args args = {
1698 		.a0 = FFA_VERSION,
1699 		.a1 = my_version,
1700 	};
1701 
1702 	thread_smccc(&args);
1703 	if (args.a0 & BIT(31)) {
1704 		EMSG("FF-A version failed with error %ld", args.a0);
1705 		panic();
1706 	}
1707 
1708 	return args.a0;
1709 }
1710 
1711 static void *spmc_retrieve_req(uint64_t cookie,
1712 			       struct ffa_mem_transaction_x *trans)
1713 {
1714 	struct ffa_mem_access *acc_descr_array = NULL;
1715 	struct ffa_mem_access_perm *perm_descr = NULL;
1716 	struct thread_smc_args args = {
1717 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1718 		.a3 =	0,	/* Address, Using TX -> MBZ */
1719 		.a4 =   0,	/* Using TX -> MBZ */
1720 	};
1721 	size_t size = 0;
1722 	int rc = 0;
1723 
1724 	if (nw_rxtx.ffa_vers == FFA_VERSION_1_0) {
1725 		struct ffa_mem_transaction_1_0 *trans_descr = nw_rxtx.tx;
1726 
1727 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1728 		memset(trans_descr, 0, size);
1729 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1730 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1731 		trans_descr->global_handle = cookie;
1732 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1733 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1734 		trans_descr->mem_access_count = 1;
1735 		acc_descr_array = trans_descr->mem_access_array;
1736 	} else {
1737 		struct ffa_mem_transaction_1_1 *trans_descr = nw_rxtx.tx;
1738 
1739 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1740 		memset(trans_descr, 0, size);
1741 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1742 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1743 		trans_descr->global_handle = cookie;
1744 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1745 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1746 		trans_descr->mem_access_count = 1;
1747 		trans_descr->mem_access_offs = sizeof(*trans_descr);
1748 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
1749 		acc_descr_array = (void *)((vaddr_t)nw_rxtx.tx +
1750 					   sizeof(*trans_descr));
1751 	}
1752 	acc_descr_array->region_offs = 0;
1753 	acc_descr_array->reserved = 0;
1754 	perm_descr = &acc_descr_array->access_perm;
1755 	perm_descr->endpoint_id = my_endpoint_id;
1756 	perm_descr->perm = FFA_MEM_ACC_RW;
1757 	perm_descr->flags = 0;
1758 
1759 	args.a1 = size; /* Total Length */
1760 	args.a2 = size; /* Frag Length == Total length */
1761 	thread_smccc(&args);
1762 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1763 		if (args.a0 == FFA_ERROR)
1764 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1765 			     cookie, (int)args.a2);
1766 		else
1767 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1768 			     cookie, args.a0);
1769 		return NULL;
1770 	}
1771 	rc = spmc_read_mem_transaction(nw_rxtx.ffa_vers, nw_rxtx.tx,
1772 				       nw_rxtx.size, trans);
1773 	if (rc) {
1774 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
1775 		     cookie, rc);
1776 		return NULL;
1777 	}
1778 
1779 	return nw_rxtx.rx;
1780 }
1781 
1782 void thread_spmc_relinquish(uint64_t cookie)
1783 {
1784 	struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx;
1785 	struct thread_smc_args args = {
1786 		.a0 = FFA_MEM_RELINQUISH,
1787 	};
1788 
1789 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1790 	relinquish_desc->handle = cookie;
1791 	relinquish_desc->flags = 0;
1792 	relinquish_desc->endpoint_count = 1;
1793 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1794 	thread_smccc(&args);
1795 	if (!is_ffa_success(args.a0))
1796 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1797 }
1798 
1799 static int set_pages(struct ffa_address_range *regions,
1800 		     unsigned int num_regions, unsigned int num_pages,
1801 		     struct mobj_ffa *mf)
1802 {
1803 	unsigned int n = 0;
1804 	unsigned int idx = 0;
1805 
1806 	for (n = 0; n < num_regions; n++) {
1807 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1808 		uint64_t addr = READ_ONCE(regions[n].address);
1809 
1810 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1811 			return FFA_INVALID_PARAMETERS;
1812 	}
1813 
1814 	if (idx != num_pages)
1815 		return FFA_INVALID_PARAMETERS;
1816 
1817 	return 0;
1818 }
1819 
1820 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1821 {
1822 	struct mobj_ffa *ret = NULL;
1823 	struct ffa_mem_transaction_x retrieve_desc = { };
1824 	struct ffa_mem_access *descr_array = NULL;
1825 	struct ffa_mem_region *descr = NULL;
1826 	struct mobj_ffa *mf = NULL;
1827 	unsigned int num_pages = 0;
1828 	unsigned int offs = 0;
1829 	void *buf = NULL;
1830 	struct thread_smc_args ffa_rx_release_args = {
1831 		.a0 = FFA_RX_RELEASE
1832 	};
1833 
1834 	/*
1835 	 * OP-TEE is only supporting a single mem_region while the
1836 	 * specification allows for more than one.
1837 	 */
1838 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
1839 	if (!buf) {
1840 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1841 		     cookie);
1842 		return NULL;
1843 	}
1844 
1845 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
1846 	offs = READ_ONCE(descr_array->region_offs);
1847 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
1848 
1849 	num_pages = READ_ONCE(descr->total_page_count);
1850 	mf = mobj_ffa_spmc_new(cookie, num_pages);
1851 	if (!mf)
1852 		goto out;
1853 
1854 	if (set_pages(descr->address_range_array,
1855 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1856 		mobj_ffa_spmc_delete(mf);
1857 		goto out;
1858 	}
1859 
1860 	ret = mf;
1861 
1862 out:
1863 	/* Release RX buffer after the mem retrieve request. */
1864 	thread_smccc(&ffa_rx_release_args);
1865 
1866 	return ret;
1867 }
1868 
1869 static TEE_Result spmc_init(void)
1870 {
1871 	unsigned int major = 0;
1872 	unsigned int minor __maybe_unused = 0;
1873 	uint32_t my_vers = 0;
1874 	uint32_t vers = 0;
1875 
1876 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1877 	vers = get_ffa_version(my_vers);
1878 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
1879 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
1880 	DMSG("SPMC reported version %u.%u", major, minor);
1881 	if (major != FFA_VERSION_MAJOR) {
1882 		EMSG("Incompatible major version %u, expected %u",
1883 		     major, FFA_VERSION_MAJOR);
1884 		panic();
1885 	}
1886 	if (vers < my_vers)
1887 		my_vers = vers;
1888 	DMSG("Using version %u.%u",
1889 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
1890 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
1891 	nw_rxtx.ffa_vers = my_vers;
1892 
1893 	spmc_rxtx_map(&nw_rxtx);
1894 	my_endpoint_id = spmc_get_id();
1895 	DMSG("My endpoint ID %#x", my_endpoint_id);
1896 
1897 	return TEE_SUCCESS;
1898 }
1899 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
1900 
1901 /*
1902  * boot_final() is always done before exiting at end of boot
1903  * initialization.  In case of virtualization the init-calls are done only
1904  * once a OP-TEE partition has been created. So with virtualization we have
1905  * to initialize via boot_final() to make sure we have a value assigned
1906  * before it's used the first time.
1907  */
1908 #ifdef CFG_NS_VIRTUALIZATION
1909 boot_final(spmc_init);
1910 #else
1911 service_init(spmc_init);
1912 #endif
1913