xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 8d541aee2e0fe7242ec6fb57aabc7a04471b2237)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_private.h>
19 #include <kernel/thread_spmc.h>
20 #include <kernel/virtualization.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <optee_ffa.h>
24 #include <optee_msg.h>
25 #include <optee_rpc_cmd.h>
26 #include <sm/optee_smc.h>
27 #include <string.h>
28 #include <sys/queue.h>
29 #include <tee/entry_std.h>
30 #include <tee/uuid.h>
31 #include <util.h>
32 
33 #if defined(CFG_CORE_SEL1_SPMC)
34 struct mem_share_state {
35 	struct mobj_ffa *mf;
36 	unsigned int page_count;
37 	unsigned int region_count;
38 	unsigned int current_page_idx;
39 };
40 
41 struct mem_frag_state {
42 	struct mem_share_state share;
43 	tee_mm_entry_t *mm;
44 	unsigned int frag_offset;
45 	SLIST_ENTRY(mem_frag_state) link;
46 };
47 #endif
48 
49 /* Initialized in spmc_init() below */
50 static uint16_t my_endpoint_id __nex_bss;
51 #ifdef CFG_CORE_SEL1_SPMC
52 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
53 				      FFA_PART_PROP_DIRECT_REQ_SEND |
54 #ifdef CFG_NS_VIRTUALIZATION
55 				      FFA_PART_PROP_NOTIF_CREATED |
56 				      FFA_PART_PROP_NOTIF_DESTROYED |
57 #endif
58 #ifdef ARM64
59 				      FFA_PART_PROP_AARCH64_STATE |
60 #endif
61 				      FFA_PART_PROP_IS_PE_ID;
62 
63 static uint32_t my_uuid_words[] = {
64 	/*
65 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
66 	 *   SP, or
67 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
68 	 *   logical partition, residing in the same exception level as the
69 	 *   SPMC
70 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
71 	 */
72 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
73 };
74 
75 /*
76  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
77  *
78  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
79  * access this includes the use of content of struct ffa_rxtx::rx and
80  * @frag_state_head.
81  *
82  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
83  * ffa_rxtx::tx and false when it is owned by normal world.
84  *
85  * Note that we can't prevent normal world from updating the content of
86  * these buffers so we must always be careful when reading. while we hold
87  * the lock.
88  */
89 
90 static struct ffa_rxtx my_rxtx __nex_bss;
91 
92 static bool is_nw_buf(struct ffa_rxtx *rxtx)
93 {
94 	return rxtx == &my_rxtx;
95 }
96 
97 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
98 	SLIST_HEAD_INITIALIZER(&frag_state_head);
99 #else
100 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
101 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
102 static struct ffa_rxtx my_rxtx = {
103 	.rx = __rx_buf,
104 	.tx = __tx_buf,
105 	.size = sizeof(__rx_buf),
106 };
107 #endif
108 
109 static uint32_t swap_src_dst(uint32_t src_dst)
110 {
111 	return (src_dst >> 16) | (src_dst << 16);
112 }
113 
114 static uint16_t get_sender_id(uint32_t src_dst)
115 {
116 	return src_dst >> 16;
117 }
118 
119 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
120 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
121 {
122 	*args = (struct thread_smc_args){ .a0 = fid,
123 					  .a1 = src_dst,
124 					  .a2 = w2,
125 					  .a3 = w3,
126 					  .a4 = w4,
127 					  .a5 = w5, };
128 }
129 
130 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
131 {
132 	/*
133 	 * No locking, if the caller does concurrent calls to this it's
134 	 * only making a mess for itself. We must be able to renegotiate
135 	 * the FF-A version in order to support differing versions between
136 	 * the loader and the driver.
137 	 */
138 	if (vers < FFA_VERSION_1_1)
139 		rxtx->ffa_vers = FFA_VERSION_1_0;
140 	else
141 		rxtx->ffa_vers = FFA_VERSION_1_1;
142 
143 	return rxtx->ffa_vers;
144 }
145 
146 #if defined(CFG_CORE_SEL1_SPMC)
147 static void handle_features(struct thread_smc_args *args)
148 {
149 	uint32_t ret_fid = 0;
150 	uint32_t ret_w2 = FFA_PARAM_MBZ;
151 
152 	switch (args->a1) {
153 #ifdef ARM64
154 	case FFA_RXTX_MAP_64:
155 #endif
156 	case FFA_RXTX_MAP_32:
157 		ret_fid = FFA_SUCCESS_32;
158 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
159 		break;
160 #ifdef ARM64
161 	case FFA_MEM_SHARE_64:
162 #endif
163 	case FFA_MEM_SHARE_32:
164 		ret_fid = FFA_SUCCESS_32;
165 		/*
166 		 * Partition manager supports transmission of a memory
167 		 * transaction descriptor in a buffer dynamically allocated
168 		 * by the endpoint.
169 		 */
170 		ret_w2 = BIT(0);
171 		break;
172 
173 	case FFA_ERROR:
174 	case FFA_VERSION:
175 	case FFA_SUCCESS_32:
176 #ifdef ARM64
177 	case FFA_SUCCESS_64:
178 #endif
179 	case FFA_FEATURES:
180 	case FFA_SPM_ID_GET:
181 	case FFA_MEM_FRAG_TX:
182 	case FFA_MEM_RECLAIM:
183 	case FFA_MSG_SEND_DIRECT_REQ_64:
184 	case FFA_MSG_SEND_DIRECT_REQ_32:
185 	case FFA_INTERRUPT:
186 	case FFA_PARTITION_INFO_GET:
187 	case FFA_RXTX_UNMAP:
188 	case FFA_RX_RELEASE:
189 	case FFA_FEATURE_MANAGED_EXIT_INTR:
190 		ret_fid = FFA_SUCCESS_32;
191 		break;
192 	default:
193 		ret_fid = FFA_ERROR;
194 		ret_w2 = FFA_NOT_SUPPORTED;
195 		break;
196 	}
197 
198 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
199 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
200 }
201 
202 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
203 {
204 	tee_mm_entry_t *mm = NULL;
205 
206 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
207 		return FFA_INVALID_PARAMETERS;
208 
209 	mm = tee_mm_alloc(&tee_mm_shm, sz);
210 	if (!mm)
211 		return FFA_NO_MEMORY;
212 
213 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
214 					  sz / SMALL_PAGE_SIZE,
215 					  MEM_AREA_NSEC_SHM)) {
216 		tee_mm_free(mm);
217 		return FFA_INVALID_PARAMETERS;
218 	}
219 
220 	*va_ret = (void *)tee_mm_get_smem(mm);
221 	return 0;
222 }
223 
224 static void handle_spm_id_get(struct thread_smc_args *args)
225 {
226 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
227 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
228 }
229 
230 static void unmap_buf(void *va, size_t sz)
231 {
232 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
233 
234 	assert(mm);
235 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
236 	tee_mm_free(mm);
237 }
238 
239 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
240 {
241 	int rc = 0;
242 	uint32_t ret_fid = FFA_ERROR;
243 	unsigned int sz = 0;
244 	paddr_t rx_pa = 0;
245 	paddr_t tx_pa = 0;
246 	void *rx = NULL;
247 	void *tx = NULL;
248 
249 	cpu_spin_lock(&rxtx->spinlock);
250 
251 	if (args->a3 & GENMASK_64(63, 6)) {
252 		rc = FFA_INVALID_PARAMETERS;
253 		goto out;
254 	}
255 
256 	sz = args->a3 * SMALL_PAGE_SIZE;
257 	if (!sz) {
258 		rc = FFA_INVALID_PARAMETERS;
259 		goto out;
260 	}
261 	/* TX/RX are swapped compared to the caller */
262 	tx_pa = args->a2;
263 	rx_pa = args->a1;
264 
265 	if (rxtx->size) {
266 		rc = FFA_DENIED;
267 		goto out;
268 	}
269 
270 	/*
271 	 * If the buffer comes from a SP the address is virtual and already
272 	 * mapped.
273 	 */
274 	if (is_nw_buf(rxtx)) {
275 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
276 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
277 			bool tx_alloced = false;
278 
279 			/*
280 			 * With virtualization we establish this mapping in
281 			 * the nexus mapping which then is replicated to
282 			 * each partition.
283 			 *
284 			 * This means that this mapping must be done before
285 			 * any partition is created and then must not be
286 			 * changed.
287 			 */
288 
289 			/*
290 			 * core_mmu_add_mapping() may reuse previous
291 			 * mappings. First check if there's any mappings to
292 			 * reuse so we know how to clean up in case of
293 			 * failure.
294 			 */
295 			tx = phys_to_virt(tx_pa, mt, sz);
296 			rx = phys_to_virt(rx_pa, mt, sz);
297 			if (!tx) {
298 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
299 				if (!tx) {
300 					rc = FFA_NO_MEMORY;
301 					goto out;
302 				}
303 				tx_alloced = true;
304 			}
305 			if (!rx)
306 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
307 
308 			if (!rx) {
309 				if (tx_alloced && tx)
310 					core_mmu_remove_mapping(mt, tx, sz);
311 				rc = FFA_NO_MEMORY;
312 				goto out;
313 			}
314 		} else {
315 			rc = map_buf(tx_pa, sz, &tx);
316 			if (rc)
317 				goto out;
318 			rc = map_buf(rx_pa, sz, &rx);
319 			if (rc) {
320 				unmap_buf(tx, sz);
321 				goto out;
322 			}
323 		}
324 		rxtx->tx = tx;
325 		rxtx->rx = rx;
326 	} else {
327 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
328 			rc = FFA_INVALID_PARAMETERS;
329 			goto out;
330 		}
331 
332 		if (!virt_to_phys((void *)tx_pa) ||
333 		    !virt_to_phys((void *)rx_pa)) {
334 			rc = FFA_INVALID_PARAMETERS;
335 			goto out;
336 		}
337 
338 		rxtx->tx = (void *)tx_pa;
339 		rxtx->rx = (void *)rx_pa;
340 	}
341 
342 	rxtx->size = sz;
343 	rxtx->tx_is_mine = true;
344 	ret_fid = FFA_SUCCESS_32;
345 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
346 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
347 out:
348 	cpu_spin_unlock(&rxtx->spinlock);
349 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
350 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
351 }
352 
353 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
354 {
355 	uint32_t ret_fid = FFA_ERROR;
356 	int rc = FFA_INVALID_PARAMETERS;
357 
358 	cpu_spin_lock(&rxtx->spinlock);
359 
360 	if (!rxtx->size)
361 		goto out;
362 
363 	/* We don't unmap the SP memory as the SP might still use it */
364 	if (is_nw_buf(rxtx)) {
365 		unmap_buf(rxtx->rx, rxtx->size);
366 		unmap_buf(rxtx->tx, rxtx->size);
367 	}
368 	rxtx->size = 0;
369 	rxtx->rx = NULL;
370 	rxtx->tx = NULL;
371 	ret_fid = FFA_SUCCESS_32;
372 	rc = 0;
373 out:
374 	cpu_spin_unlock(&rxtx->spinlock);
375 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
376 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
377 }
378 
379 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
380 {
381 	uint32_t ret_fid = 0;
382 	int rc = 0;
383 
384 	cpu_spin_lock(&rxtx->spinlock);
385 	/* The senders RX is our TX */
386 	if (!rxtx->size || rxtx->tx_is_mine) {
387 		ret_fid = FFA_ERROR;
388 		rc = FFA_DENIED;
389 	} else {
390 		ret_fid = FFA_SUCCESS_32;
391 		rc = 0;
392 		rxtx->tx_is_mine = true;
393 	}
394 	cpu_spin_unlock(&rxtx->spinlock);
395 
396 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
397 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
398 }
399 
400 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
401 {
402 	return !w0 && !w1 && !w2 && !w3;
403 }
404 
405 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
406 {
407 	/*
408 	 * This depends on which UUID we have been assigned.
409 	 * TODO add a generic mechanism to obtain our UUID.
410 	 *
411 	 * The test below is for the hard coded UUID
412 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
413 	 */
414 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
415 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
416 }
417 
418 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
419 				     size_t idx, uint16_t endpoint_id,
420 				     uint16_t execution_context,
421 				     uint32_t part_props,
422 				     const uint32_t uuid_words[4])
423 {
424 	struct ffa_partition_info_x *fpi = NULL;
425 	size_t fpi_size = sizeof(*fpi);
426 
427 	if (ffa_vers >= FFA_VERSION_1_1)
428 		fpi_size += FFA_UUID_SIZE;
429 
430 	if ((idx + 1) * fpi_size > blen)
431 		return TEE_ERROR_OUT_OF_MEMORY;
432 
433 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
434 	fpi->id = endpoint_id;
435 	/* Number of execution contexts implemented by this partition */
436 	fpi->execution_context = execution_context;
437 
438 	fpi->partition_properties = part_props;
439 
440 	if (ffa_vers >= FFA_VERSION_1_1) {
441 		if (uuid_words)
442 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
443 		else
444 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
445 	}
446 
447 	return TEE_SUCCESS;
448 }
449 
450 static int handle_partition_info_get_all(size_t *elem_count,
451 					 struct ffa_rxtx *rxtx, bool count_only)
452 {
453 	if (!count_only) {
454 		/* Add OP-TEE SP */
455 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
456 					      rxtx->size, 0, my_endpoint_id,
457 					      CFG_TEE_CORE_NB_CORE,
458 					      my_part_props, my_uuid_words))
459 			return FFA_NO_MEMORY;
460 	}
461 	*elem_count = 1;
462 
463 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
464 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
465 					  NULL, elem_count, count_only))
466 			return FFA_NO_MEMORY;
467 	}
468 
469 	return FFA_OK;
470 }
471 
472 void spmc_handle_partition_info_get(struct thread_smc_args *args,
473 				    struct ffa_rxtx *rxtx)
474 {
475 	TEE_Result res = TEE_SUCCESS;
476 	uint32_t ret_fid = FFA_ERROR;
477 	uint32_t rc = 0;
478 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
479 
480 	if (!count_only) {
481 		cpu_spin_lock(&rxtx->spinlock);
482 
483 		if (!rxtx->size || !rxtx->tx_is_mine) {
484 			rc = FFA_BUSY;
485 			goto out;
486 		}
487 	}
488 
489 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
490 		size_t elem_count = 0;
491 
492 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
493 							count_only);
494 
495 		if (ret_fid) {
496 			rc = ret_fid;
497 			ret_fid = FFA_ERROR;
498 		} else {
499 			ret_fid = FFA_SUCCESS_32;
500 			rc = elem_count;
501 		}
502 
503 		goto out;
504 	}
505 
506 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
507 		if (!count_only) {
508 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
509 							rxtx->tx, rxtx->size, 0,
510 							my_endpoint_id,
511 							CFG_TEE_CORE_NB_CORE,
512 							my_part_props,
513 							my_uuid_words);
514 			if (res) {
515 				ret_fid = FFA_ERROR;
516 				rc = FFA_INVALID_PARAMETERS;
517 				goto out;
518 			}
519 		}
520 		rc = 1;
521 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
522 		uint32_t uuid_array[4] = { 0 };
523 		TEE_UUID uuid = { };
524 		size_t count = 0;
525 
526 		uuid_array[0] = args->a1;
527 		uuid_array[1] = args->a2;
528 		uuid_array[2] = args->a3;
529 		uuid_array[3] = args->a4;
530 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
531 
532 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
533 					    rxtx->size, &uuid, &count,
534 					    count_only);
535 		if (res != TEE_SUCCESS) {
536 			ret_fid = FFA_ERROR;
537 			rc = FFA_INVALID_PARAMETERS;
538 			goto out;
539 		}
540 		rc = count;
541 	} else {
542 		ret_fid = FFA_ERROR;
543 		rc = FFA_INVALID_PARAMETERS;
544 		goto out;
545 	}
546 
547 	ret_fid = FFA_SUCCESS_32;
548 
549 out:
550 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
551 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
552 	if (!count_only) {
553 		rxtx->tx_is_mine = false;
554 		cpu_spin_unlock(&rxtx->spinlock);
555 	}
556 }
557 
558 static void spmc_handle_run(struct thread_smc_args *args)
559 {
560 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
561 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
562 	uint32_t rc = FFA_OK;
563 
564 	if (endpoint != my_endpoint_id) {
565 		/*
566 		 * The endpoint should be an SP, try to resume the SP from
567 		 * preempted into busy state.
568 		 */
569 		rc = spmc_sp_resume_from_preempted(endpoint);
570 		if (rc)
571 			goto out;
572 	}
573 
574 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
575 
576 	/* thread_resume_from_rpc return only of the thread_id is invalid */
577 	rc = FFA_INVALID_PARAMETERS;
578 
579 out:
580 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
581 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
582 }
583 #endif /*CFG_CORE_SEL1_SPMC*/
584 
585 static void handle_yielding_call(struct thread_smc_args *args,
586 				 uint32_t direct_resp_fid)
587 {
588 	TEE_Result res = 0;
589 
590 	thread_check_canaries();
591 
592 #ifdef ARM64
593 	/* Saving this for an eventual RPC */
594 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
595 #endif
596 
597 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
598 		/* Note connection to struct thread_rpc_arg::ret */
599 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
600 				       0);
601 		res = TEE_ERROR_BAD_PARAMETERS;
602 	} else {
603 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
604 				     args->a6, args->a7);
605 		res = TEE_ERROR_BUSY;
606 	}
607 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
608 		      0, res, 0, 0);
609 }
610 
611 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
612 {
613 	uint64_t cookie = reg_pair_to_64(a5, a4);
614 	uint32_t res = 0;
615 
616 	res = mobj_ffa_unregister_by_cookie(cookie);
617 	switch (res) {
618 	case TEE_SUCCESS:
619 	case TEE_ERROR_ITEM_NOT_FOUND:
620 		return 0;
621 	case TEE_ERROR_BUSY:
622 		EMSG("res %#"PRIx32, res);
623 		return FFA_BUSY;
624 	default:
625 		EMSG("res %#"PRIx32, res);
626 		return FFA_INVALID_PARAMETERS;
627 	}
628 }
629 
630 static void handle_blocking_call(struct thread_smc_args *args,
631 				 uint32_t direct_resp_fid)
632 {
633 	switch (args->a3) {
634 	case OPTEE_FFA_GET_API_VERSION:
635 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
636 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
637 			      0);
638 		break;
639 	case OPTEE_FFA_GET_OS_VERSION:
640 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
641 			      CFG_OPTEE_REVISION_MAJOR,
642 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
643 		break;
644 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
645 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
646 			      0, THREAD_RPC_MAX_NUM_PARAMS,
647 			      OPTEE_FFA_SEC_CAP_ARG_OFFSET);
648 		break;
649 	case OPTEE_FFA_UNREGISTER_SHM:
650 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
651 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
652 		break;
653 	default:
654 		EMSG("Unhandled blocking service ID %#"PRIx32,
655 		     (uint32_t)args->a3);
656 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
657 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
658 	}
659 }
660 
661 static void handle_framework_direct_request(struct thread_smc_args *args,
662 					    struct ffa_rxtx *rxtx,
663 					    uint32_t direct_resp_fid)
664 {
665 	uint32_t w0 = FFA_ERROR;
666 	uint32_t w1 = FFA_PARAM_MBZ;
667 	uint32_t w2 = FFA_NOT_SUPPORTED;
668 	uint32_t w3 = FFA_PARAM_MBZ;
669 
670 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
671 	case FFA_MSG_SEND_VM_CREATED:
672 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
673 			uint16_t guest_id = args->a5;
674 			TEE_Result res = virt_guest_created(guest_id);
675 
676 			w0 = direct_resp_fid;
677 			w1 = swap_src_dst(args->a1);
678 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
679 			if (res == TEE_SUCCESS)
680 				w3 = FFA_OK;
681 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
682 				w3 = FFA_DENIED;
683 			else
684 				w3 = FFA_INVALID_PARAMETERS;
685 		}
686 		break;
687 	case FFA_MSG_SEND_VM_DESTROYED:
688 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
689 			uint16_t guest_id = args->a5;
690 			TEE_Result res = virt_guest_destroyed(guest_id);
691 
692 			w0 = direct_resp_fid;
693 			w1 = swap_src_dst(args->a1);
694 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
695 			if (res == TEE_SUCCESS)
696 				w3 = FFA_OK;
697 			else
698 				w3 = FFA_INVALID_PARAMETERS;
699 		}
700 		break;
701 	case FFA_MSG_VERSION_REQ:
702 		w0 = direct_resp_fid;
703 		w1 = swap_src_dst(args->a1);
704 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
705 		w3 = spmc_exchange_version(args->a3, rxtx);
706 		break;
707 	default:
708 		break;
709 	}
710 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
711 }
712 
713 static void handle_direct_request(struct thread_smc_args *args,
714 				  struct ffa_rxtx *rxtx)
715 {
716 	uint32_t direct_resp_fid = 0;
717 
718 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
719 	    FFA_DST(args->a1) != my_endpoint_id) {
720 		spmc_sp_start_thread(args);
721 		return;
722 	}
723 
724 	if (OPTEE_SMC_IS_64(args->a0))
725 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
726 	else
727 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
728 
729 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
730 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
731 		return;
732 	}
733 
734 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
735 	    virt_set_guest(get_sender_id(args->a1))) {
736 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
737 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
738 		return;
739 	}
740 
741 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
742 		handle_yielding_call(args, direct_resp_fid);
743 	else
744 		handle_blocking_call(args, direct_resp_fid);
745 
746 	/*
747 	 * Note that handle_yielding_call() typically only returns if a
748 	 * thread cannot be allocated or found. virt_unset_guest() is also
749 	 * called from thread_state_suspend() and thread_state_free().
750 	 */
751 	virt_unset_guest();
752 }
753 
754 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
755 			      struct ffa_mem_transaction_x *trans)
756 {
757 	uint16_t mem_reg_attr = 0;
758 	uint32_t flags = 0;
759 	uint32_t count = 0;
760 	uint32_t offs = 0;
761 	uint32_t size = 0;
762 	size_t n = 0;
763 
764 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
765 		return FFA_INVALID_PARAMETERS;
766 
767 	if (ffa_vers >= FFA_VERSION_1_1) {
768 		struct ffa_mem_transaction_1_1 *descr = NULL;
769 
770 		if (blen < sizeof(*descr))
771 			return FFA_INVALID_PARAMETERS;
772 
773 		descr = buf;
774 		trans->sender_id = READ_ONCE(descr->sender_id);
775 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
776 		flags = READ_ONCE(descr->flags);
777 		trans->global_handle = READ_ONCE(descr->global_handle);
778 		trans->tag = READ_ONCE(descr->tag);
779 
780 		count = READ_ONCE(descr->mem_access_count);
781 		size = READ_ONCE(descr->mem_access_size);
782 		offs = READ_ONCE(descr->mem_access_offs);
783 	} else {
784 		struct ffa_mem_transaction_1_0 *descr = NULL;
785 
786 		if (blen < sizeof(*descr))
787 			return FFA_INVALID_PARAMETERS;
788 
789 		descr = buf;
790 		trans->sender_id = READ_ONCE(descr->sender_id);
791 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
792 		flags = READ_ONCE(descr->flags);
793 		trans->global_handle = READ_ONCE(descr->global_handle);
794 		trans->tag = READ_ONCE(descr->tag);
795 
796 		count = READ_ONCE(descr->mem_access_count);
797 		size = sizeof(struct ffa_mem_access);
798 		offs = offsetof(struct ffa_mem_transaction_1_0,
799 				mem_access_array);
800 	}
801 
802 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
803 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
804 		return FFA_INVALID_PARAMETERS;
805 
806 	/* Check that the endpoint memory access descriptor array fits */
807 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
808 	    n > blen)
809 		return FFA_INVALID_PARAMETERS;
810 
811 	trans->mem_reg_attr = mem_reg_attr;
812 	trans->flags = flags;
813 	trans->mem_access_size = size;
814 	trans->mem_access_count = count;
815 	trans->mem_access_offs = offs;
816 	return 0;
817 }
818 
819 #if defined(CFG_CORE_SEL1_SPMC)
820 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
821 			 unsigned int mem_access_count, uint8_t *acc_perms,
822 			 unsigned int *region_offs)
823 {
824 	struct ffa_mem_access_perm *descr = NULL;
825 	struct ffa_mem_access *mem_acc = NULL;
826 	unsigned int n = 0;
827 
828 	for (n = 0; n < mem_access_count; n++) {
829 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
830 		descr = &mem_acc->access_perm;
831 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
832 			*acc_perms = READ_ONCE(descr->perm);
833 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
834 			return 0;
835 		}
836 	}
837 
838 	return FFA_INVALID_PARAMETERS;
839 }
840 
841 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
842 			  size_t blen, unsigned int *page_count,
843 			  unsigned int *region_count, size_t *addr_range_offs)
844 {
845 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
846 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
847 	struct ffa_mem_region *region_descr = NULL;
848 	unsigned int region_descr_offs = 0;
849 	uint8_t mem_acc_perm = 0;
850 	size_t n = 0;
851 
852 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
853 		return FFA_INVALID_PARAMETERS;
854 
855 	/* Check that the access permissions matches what's expected */
856 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
857 			  mem_trans->mem_access_size,
858 			  mem_trans->mem_access_count,
859 			  &mem_acc_perm, &region_descr_offs) ||
860 	    mem_acc_perm != exp_mem_acc_perm)
861 		return FFA_INVALID_PARAMETERS;
862 
863 	/* Check that the Composite memory region descriptor fits */
864 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
865 	    n > blen)
866 		return FFA_INVALID_PARAMETERS;
867 
868 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
869 				  struct ffa_mem_region))
870 		return FFA_INVALID_PARAMETERS;
871 
872 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
873 						 region_descr_offs);
874 	*page_count = READ_ONCE(region_descr->total_page_count);
875 	*region_count = READ_ONCE(region_descr->address_range_count);
876 	*addr_range_offs = n;
877 	return 0;
878 }
879 
880 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
881 				size_t flen)
882 {
883 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
884 	struct ffa_address_range *arange = NULL;
885 	unsigned int n = 0;
886 
887 	if (region_count > s->region_count)
888 		region_count = s->region_count;
889 
890 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
891 		return FFA_INVALID_PARAMETERS;
892 	arange = buf;
893 
894 	for (n = 0; n < region_count; n++) {
895 		unsigned int page_count = READ_ONCE(arange[n].page_count);
896 		uint64_t addr = READ_ONCE(arange[n].address);
897 
898 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
899 					  addr, page_count))
900 			return FFA_INVALID_PARAMETERS;
901 	}
902 
903 	s->region_count -= region_count;
904 	if (s->region_count)
905 		return region_count * sizeof(*arange);
906 
907 	if (s->current_page_idx != s->page_count)
908 		return FFA_INVALID_PARAMETERS;
909 
910 	return 0;
911 }
912 
913 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
914 {
915 	int rc = 0;
916 
917 	rc = add_mem_share_helper(&s->share, buf, flen);
918 	if (rc >= 0) {
919 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
920 			/* We're not at the end of the descriptor yet */
921 			if (s->share.region_count)
922 				return s->frag_offset;
923 
924 			/* We're done */
925 			rc = 0;
926 		} else {
927 			rc = FFA_INVALID_PARAMETERS;
928 		}
929 	}
930 
931 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
932 	if (rc < 0)
933 		mobj_ffa_sel1_spmc_delete(s->share.mf);
934 	else
935 		mobj_ffa_push_to_inactive(s->share.mf);
936 	free(s);
937 
938 	return rc;
939 }
940 
941 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
942 			void *buf)
943 {
944 	struct ffa_mem_access_perm *perm = NULL;
945 	struct ffa_mem_access *mem_acc = NULL;
946 
947 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
948 		return false;
949 
950 	if (mem_trans->mem_access_count < 1)
951 		return false;
952 
953 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
954 	perm = &mem_acc->access_perm;
955 
956 	/*
957 	 * perm->endpoint_id is read here only to check if the endpoint is
958 	 * OP-TEE. We do read it later on again, but there are some additional
959 	 * checks there to make sure that the data is correct.
960 	 */
961 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
962 }
963 
964 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
965 			 tee_mm_entry_t *mm, void *buf, size_t blen,
966 			 size_t flen, uint64_t *global_handle)
967 {
968 	int rc = 0;
969 	struct mem_share_state share = { };
970 	size_t addr_range_offs = 0;
971 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
972 	size_t n = 0;
973 
974 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
975 			    &share.region_count, &addr_range_offs);
976 	if (rc)
977 		return rc;
978 
979 	if (MUL_OVERFLOW(share.region_count,
980 			 sizeof(struct ffa_address_range), &n) ||
981 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
982 		return FFA_INVALID_PARAMETERS;
983 
984 	if (mem_trans->global_handle)
985 		cookie = mem_trans->global_handle;
986 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
987 	if (!share.mf)
988 		return FFA_NO_MEMORY;
989 
990 	if (flen != blen) {
991 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
992 
993 		if (!s) {
994 			rc = FFA_NO_MEMORY;
995 			goto err;
996 		}
997 		s->share = share;
998 		s->mm = mm;
999 		s->frag_offset = addr_range_offs;
1000 
1001 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1002 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1003 					flen - addr_range_offs);
1004 
1005 		if (rc >= 0)
1006 			*global_handle = mobj_ffa_get_cookie(share.mf);
1007 
1008 		return rc;
1009 	}
1010 
1011 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1012 				  flen - addr_range_offs);
1013 	if (rc) {
1014 		/*
1015 		 * Number of consumed bytes may be returned instead of 0 for
1016 		 * done.
1017 		 */
1018 		rc = FFA_INVALID_PARAMETERS;
1019 		goto err;
1020 	}
1021 
1022 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1023 
1024 	return 0;
1025 err:
1026 	mobj_ffa_sel1_spmc_delete(share.mf);
1027 	return rc;
1028 }
1029 
1030 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1031 				 unsigned int page_count,
1032 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1033 {
1034 	struct ffa_mem_transaction_x mem_trans = { };
1035 	int rc = 0;
1036 	size_t len = 0;
1037 	void *buf = NULL;
1038 	tee_mm_entry_t *mm = NULL;
1039 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1040 
1041 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1042 		return FFA_INVALID_PARAMETERS;
1043 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1044 		return FFA_INVALID_PARAMETERS;
1045 
1046 	/*
1047 	 * Check that the length reported in flen is covered by len even
1048 	 * if the offset is taken into account.
1049 	 */
1050 	if (len < flen || len - offs < flen)
1051 		return FFA_INVALID_PARAMETERS;
1052 
1053 	mm = tee_mm_alloc(&tee_mm_shm, len);
1054 	if (!mm)
1055 		return FFA_NO_MEMORY;
1056 
1057 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1058 					  page_count, MEM_AREA_NSEC_SHM)) {
1059 		rc = FFA_INVALID_PARAMETERS;
1060 		goto out;
1061 	}
1062 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1063 
1064 	cpu_spin_lock(&rxtx->spinlock);
1065 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1066 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1067 	    virt_set_guest(mem_trans.sender_id))
1068 		rc = FFA_DENIED;
1069 	if (!rc)
1070 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1071 				   global_handle);
1072 	virt_unset_guest();
1073 	cpu_spin_unlock(&rxtx->spinlock);
1074 	if (rc > 0)
1075 		return rc;
1076 
1077 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1078 out:
1079 	tee_mm_free(mm);
1080 	return rc;
1081 }
1082 
1083 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1084 				  uint64_t *global_handle,
1085 				  struct ffa_rxtx *rxtx)
1086 {
1087 	struct ffa_mem_transaction_x mem_trans = { };
1088 	int rc = FFA_DENIED;
1089 
1090 	cpu_spin_lock(&rxtx->spinlock);
1091 
1092 	if (!rxtx->rx || flen > rxtx->size)
1093 		goto out;
1094 
1095 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1096 				       &mem_trans);
1097 	if (rc)
1098 		goto out;
1099 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1100 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1101 				       global_handle, NULL);
1102 		goto out;
1103 	}
1104 
1105 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1106 	    virt_set_guest(mem_trans.sender_id))
1107 		goto out;
1108 
1109 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1110 			   global_handle);
1111 
1112 	virt_unset_guest();
1113 
1114 out:
1115 	cpu_spin_unlock(&rxtx->spinlock);
1116 
1117 	return rc;
1118 }
1119 
1120 static void handle_mem_share(struct thread_smc_args *args,
1121 			     struct ffa_rxtx *rxtx)
1122 {
1123 	uint32_t tot_len = args->a1;
1124 	uint32_t frag_len = args->a2;
1125 	uint64_t addr = args->a3;
1126 	uint32_t page_count = args->a4;
1127 	uint32_t ret_w1 = 0;
1128 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1129 	uint32_t ret_w3 = 0;
1130 	uint32_t ret_fid = FFA_ERROR;
1131 	uint64_t global_handle = 0;
1132 	int rc = 0;
1133 
1134 	/* Check that the MBZs are indeed 0 */
1135 	if (args->a5 || args->a6 || args->a7)
1136 		goto out;
1137 
1138 	/* Check that fragment length doesn't exceed total length */
1139 	if (frag_len > tot_len)
1140 		goto out;
1141 
1142 	/* Check for 32-bit calling convention */
1143 	if (args->a0 == FFA_MEM_SHARE_32)
1144 		addr &= UINT32_MAX;
1145 
1146 	if (!addr) {
1147 		/*
1148 		 * The memory transaction descriptor is passed via our rx
1149 		 * buffer.
1150 		 */
1151 		if (page_count)
1152 			goto out;
1153 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1154 					    rxtx);
1155 	} else {
1156 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1157 					   &global_handle, rxtx);
1158 	}
1159 	if (rc < 0) {
1160 		ret_w2 = rc;
1161 	} else if (rc > 0) {
1162 		ret_fid = FFA_MEM_FRAG_RX;
1163 		ret_w3 = rc;
1164 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1165 	} else {
1166 		ret_fid = FFA_SUCCESS_32;
1167 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1168 	}
1169 out:
1170 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1171 }
1172 
1173 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1174 {
1175 	struct mem_frag_state *s = NULL;
1176 
1177 	SLIST_FOREACH(s, &frag_state_head, link)
1178 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1179 			return s;
1180 
1181 	return NULL;
1182 }
1183 
1184 static void handle_mem_frag_tx(struct thread_smc_args *args,
1185 			       struct ffa_rxtx *rxtx)
1186 {
1187 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1188 	size_t flen = args->a3;
1189 	uint32_t endpoint_id = args->a4;
1190 	struct mem_frag_state *s = NULL;
1191 	tee_mm_entry_t *mm = NULL;
1192 	unsigned int page_count = 0;
1193 	void *buf = NULL;
1194 	uint32_t ret_w1 = 0;
1195 	uint32_t ret_w2 = 0;
1196 	uint32_t ret_w3 = 0;
1197 	uint32_t ret_fid = 0;
1198 	int rc = 0;
1199 
1200 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1201 		uint16_t guest_id = endpoint_id >> 16;
1202 
1203 		if (!guest_id || virt_set_guest(guest_id)) {
1204 			rc = FFA_INVALID_PARAMETERS;
1205 			goto out_set_rc;
1206 		}
1207 	}
1208 
1209 	/*
1210 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1211 	 * requests.
1212 	 */
1213 
1214 	cpu_spin_lock(&rxtx->spinlock);
1215 
1216 	s = get_frag_state(global_handle);
1217 	if (!s) {
1218 		rc = FFA_INVALID_PARAMETERS;
1219 		goto out;
1220 	}
1221 
1222 	mm = s->mm;
1223 	if (mm) {
1224 		if (flen > tee_mm_get_bytes(mm)) {
1225 			rc = FFA_INVALID_PARAMETERS;
1226 			goto out;
1227 		}
1228 		page_count = s->share.page_count;
1229 		buf = (void *)tee_mm_get_smem(mm);
1230 	} else {
1231 		if (flen > rxtx->size) {
1232 			rc = FFA_INVALID_PARAMETERS;
1233 			goto out;
1234 		}
1235 		buf = rxtx->rx;
1236 	}
1237 
1238 	rc = add_mem_share_frag(s, buf, flen);
1239 out:
1240 	virt_unset_guest();
1241 	cpu_spin_unlock(&rxtx->spinlock);
1242 
1243 	if (rc <= 0 && mm) {
1244 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1245 		tee_mm_free(mm);
1246 	}
1247 
1248 out_set_rc:
1249 	if (rc < 0) {
1250 		ret_fid = FFA_ERROR;
1251 		ret_w2 = rc;
1252 	} else if (rc > 0) {
1253 		ret_fid = FFA_MEM_FRAG_RX;
1254 		ret_w3 = rc;
1255 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1256 	} else {
1257 		ret_fid = FFA_SUCCESS_32;
1258 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1259 	}
1260 
1261 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1262 }
1263 
1264 static void handle_mem_reclaim(struct thread_smc_args *args)
1265 {
1266 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1267 	uint32_t ret_fid = FFA_ERROR;
1268 	uint64_t cookie = 0;
1269 
1270 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1271 		goto out;
1272 
1273 	cookie = reg_pair_to_64(args->a2, args->a1);
1274 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1275 		uint16_t guest_id = 0;
1276 
1277 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1278 			guest_id = virt_find_guest_by_cookie(cookie);
1279 		} else {
1280 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1281 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1282 		}
1283 		if (!guest_id || virt_set_guest(guest_id))
1284 			goto out;
1285 	}
1286 
1287 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1288 	case TEE_SUCCESS:
1289 		ret_fid = FFA_SUCCESS_32;
1290 		ret_val = 0;
1291 		break;
1292 	case TEE_ERROR_ITEM_NOT_FOUND:
1293 		DMSG("cookie %#"PRIx64" not found", cookie);
1294 		ret_val = FFA_INVALID_PARAMETERS;
1295 		break;
1296 	default:
1297 		DMSG("cookie %#"PRIx64" busy", cookie);
1298 		ret_val = FFA_DENIED;
1299 		break;
1300 	}
1301 
1302 	virt_unset_guest();
1303 
1304 out:
1305 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
1306 }
1307 #endif
1308 
1309 /* Only called from assembly */
1310 void thread_spmc_msg_recv(struct thread_smc_args *args);
1311 void thread_spmc_msg_recv(struct thread_smc_args *args)
1312 {
1313 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1314 	switch (args->a0) {
1315 #if defined(CFG_CORE_SEL1_SPMC)
1316 	case FFA_FEATURES:
1317 		handle_features(args);
1318 		break;
1319 	case FFA_SPM_ID_GET:
1320 		handle_spm_id_get(args);
1321 		break;
1322 #ifdef ARM64
1323 	case FFA_RXTX_MAP_64:
1324 #endif
1325 	case FFA_RXTX_MAP_32:
1326 		spmc_handle_rxtx_map(args, &my_rxtx);
1327 		break;
1328 	case FFA_RXTX_UNMAP:
1329 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1330 		break;
1331 	case FFA_RX_RELEASE:
1332 		spmc_handle_rx_release(args, &my_rxtx);
1333 		break;
1334 	case FFA_PARTITION_INFO_GET:
1335 		spmc_handle_partition_info_get(args, &my_rxtx);
1336 		break;
1337 	case FFA_RUN:
1338 		spmc_handle_run(args);
1339 		break;
1340 #endif /*CFG_CORE_SEL1_SPMC*/
1341 	case FFA_INTERRUPT:
1342 		interrupt_main_handler();
1343 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1344 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1345 				      0, 0);
1346 		else
1347 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1348 		break;
1349 #ifdef ARM64
1350 	case FFA_MSG_SEND_DIRECT_REQ_64:
1351 #endif
1352 	case FFA_MSG_SEND_DIRECT_REQ_32:
1353 		handle_direct_request(args, &my_rxtx);
1354 		break;
1355 #if defined(CFG_CORE_SEL1_SPMC)
1356 #ifdef ARM64
1357 	case FFA_MEM_SHARE_64:
1358 #endif
1359 	case FFA_MEM_SHARE_32:
1360 		handle_mem_share(args, &my_rxtx);
1361 		break;
1362 	case FFA_MEM_RECLAIM:
1363 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1364 		    !ffa_mem_reclaim(args, NULL))
1365 			handle_mem_reclaim(args);
1366 		break;
1367 	case FFA_MEM_FRAG_TX:
1368 		handle_mem_frag_tx(args, &my_rxtx);
1369 		break;
1370 #endif /*CFG_CORE_SEL1_SPMC*/
1371 	default:
1372 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1373 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1374 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1375 	}
1376 }
1377 
1378 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1379 {
1380 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1381 	struct thread_ctx *thr = threads + thread_get_id();
1382 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1383 	struct optee_msg_arg *arg = NULL;
1384 	struct mobj *mobj = NULL;
1385 	uint32_t num_params = 0;
1386 	size_t sz = 0;
1387 
1388 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1389 	if (!mobj) {
1390 		EMSG("Can't find cookie %#"PRIx64, cookie);
1391 		return TEE_ERROR_BAD_PARAMETERS;
1392 	}
1393 
1394 	res = mobj_inc_map(mobj);
1395 	if (res)
1396 		goto out_put_mobj;
1397 
1398 	res = TEE_ERROR_BAD_PARAMETERS;
1399 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1400 	if (!arg)
1401 		goto out_dec_map;
1402 
1403 	num_params = READ_ONCE(arg->num_params);
1404 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1405 		goto out_dec_map;
1406 
1407 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1408 
1409 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1410 	if (!thr->rpc_arg)
1411 		goto out_dec_map;
1412 
1413 	virt_on_stdcall();
1414 	res = tee_entry_std(arg, num_params);
1415 
1416 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1417 	thr->rpc_arg = NULL;
1418 
1419 out_dec_map:
1420 	mobj_dec_map(mobj);
1421 out_put_mobj:
1422 	mobj_put(mobj);
1423 	return res;
1424 }
1425 
1426 /*
1427  * Helper routine for the assembly function thread_std_smc_entry()
1428  *
1429  * Note: this function is weak just to make link_dummies_paged.c happy.
1430  */
1431 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1432 				       uint32_t a2, uint32_t a3,
1433 				       uint32_t a4, uint32_t a5 __unused)
1434 {
1435 	/*
1436 	 * Arguments are supplied from handle_yielding_call() as:
1437 	 * a0 <- w1
1438 	 * a1 <- w3
1439 	 * a2 <- w4
1440 	 * a3 <- w5
1441 	 * a4 <- w6
1442 	 * a5 <- w7
1443 	 */
1444 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1445 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1446 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1447 	return FFA_DENIED;
1448 }
1449 
1450 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1451 {
1452 	uint64_t offs = tpm->u.memref.offs;
1453 
1454 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1455 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1456 
1457 	param->u.fmem.offs_low = offs;
1458 	param->u.fmem.offs_high = offs >> 32;
1459 	if (param->u.fmem.offs_high != offs >> 32)
1460 		return false;
1461 
1462 	param->u.fmem.size = tpm->u.memref.size;
1463 	if (tpm->u.memref.mobj) {
1464 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1465 
1466 		/* If a mobj is passed it better be one with a valid cookie. */
1467 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1468 			return false;
1469 		param->u.fmem.global_id = cookie;
1470 	} else {
1471 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1472 	}
1473 
1474 	return true;
1475 }
1476 
1477 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1478 			    struct thread_param *params,
1479 			    struct optee_msg_arg **arg_ret)
1480 {
1481 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1482 	struct thread_ctx *thr = threads + thread_get_id();
1483 	struct optee_msg_arg *arg = thr->rpc_arg;
1484 
1485 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1486 		return TEE_ERROR_BAD_PARAMETERS;
1487 
1488 	if (!arg) {
1489 		EMSG("rpc_arg not set");
1490 		return TEE_ERROR_GENERIC;
1491 	}
1492 
1493 	memset(arg, 0, sz);
1494 	arg->cmd = cmd;
1495 	arg->num_params = num_params;
1496 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1497 
1498 	for (size_t n = 0; n < num_params; n++) {
1499 		switch (params[n].attr) {
1500 		case THREAD_PARAM_ATTR_NONE:
1501 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1502 			break;
1503 		case THREAD_PARAM_ATTR_VALUE_IN:
1504 		case THREAD_PARAM_ATTR_VALUE_OUT:
1505 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1506 			arg->params[n].attr = params[n].attr -
1507 					      THREAD_PARAM_ATTR_VALUE_IN +
1508 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1509 			arg->params[n].u.value.a = params[n].u.value.a;
1510 			arg->params[n].u.value.b = params[n].u.value.b;
1511 			arg->params[n].u.value.c = params[n].u.value.c;
1512 			break;
1513 		case THREAD_PARAM_ATTR_MEMREF_IN:
1514 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1515 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1516 			if (!set_fmem(arg->params + n, params + n))
1517 				return TEE_ERROR_BAD_PARAMETERS;
1518 			break;
1519 		default:
1520 			return TEE_ERROR_BAD_PARAMETERS;
1521 		}
1522 	}
1523 
1524 	if (arg_ret)
1525 		*arg_ret = arg;
1526 
1527 	return TEE_SUCCESS;
1528 }
1529 
1530 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1531 				struct thread_param *params)
1532 {
1533 	for (size_t n = 0; n < num_params; n++) {
1534 		switch (params[n].attr) {
1535 		case THREAD_PARAM_ATTR_VALUE_OUT:
1536 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1537 			params[n].u.value.a = arg->params[n].u.value.a;
1538 			params[n].u.value.b = arg->params[n].u.value.b;
1539 			params[n].u.value.c = arg->params[n].u.value.c;
1540 			break;
1541 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1542 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1543 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1544 			break;
1545 		default:
1546 			break;
1547 		}
1548 	}
1549 
1550 	return arg->ret;
1551 }
1552 
1553 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1554 			struct thread_param *params)
1555 {
1556 	struct thread_rpc_arg rpc_arg = { .call = {
1557 			.w1 = thread_get_tsd()->rpc_target_info,
1558 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1559 		},
1560 	};
1561 	struct optee_msg_arg *arg = NULL;
1562 	uint32_t ret = 0;
1563 
1564 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1565 	if (ret)
1566 		return ret;
1567 
1568 	thread_rpc(&rpc_arg);
1569 
1570 	return get_rpc_arg_res(arg, num_params, params);
1571 }
1572 
1573 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1574 {
1575 	struct thread_rpc_arg rpc_arg = { .call = {
1576 			.w1 = thread_get_tsd()->rpc_target_info,
1577 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1578 		},
1579 	};
1580 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1581 	uint32_t res2 = 0;
1582 	uint32_t res = 0;
1583 
1584 	DMSG("freeing cookie %#"PRIx64, cookie);
1585 
1586 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1587 
1588 	mobj_put(mobj);
1589 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1590 	if (res2)
1591 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1592 		     cookie, res2);
1593 	if (!res)
1594 		thread_rpc(&rpc_arg);
1595 }
1596 
1597 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1598 {
1599 	struct thread_rpc_arg rpc_arg = { .call = {
1600 			.w1 = thread_get_tsd()->rpc_target_info,
1601 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1602 		},
1603 	};
1604 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1605 	struct optee_msg_arg *arg = NULL;
1606 	unsigned int internal_offset = 0;
1607 	struct mobj *mobj = NULL;
1608 	uint64_t cookie = 0;
1609 
1610 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1611 		return NULL;
1612 
1613 	thread_rpc(&rpc_arg);
1614 
1615 	if (arg->num_params != 1 ||
1616 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1617 		return NULL;
1618 
1619 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1620 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1621 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1622 	if (!mobj) {
1623 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1624 		     cookie, internal_offset);
1625 		return NULL;
1626 	}
1627 
1628 	assert(mobj_is_nonsec(mobj));
1629 
1630 	if (mobj->size < size) {
1631 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1632 		mobj_put(mobj);
1633 		return NULL;
1634 	}
1635 
1636 	if (mobj_inc_map(mobj)) {
1637 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1638 		mobj_put(mobj);
1639 		return NULL;
1640 	}
1641 
1642 	return mobj;
1643 }
1644 
1645 struct mobj *thread_rpc_alloc_payload(size_t size)
1646 {
1647 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1648 }
1649 
1650 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1651 {
1652 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1653 }
1654 
1655 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1656 {
1657 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1658 }
1659 
1660 void thread_rpc_free_payload(struct mobj *mobj)
1661 {
1662 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1663 			mobj);
1664 }
1665 
1666 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1667 {
1668 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1669 }
1670 
1671 void thread_rpc_free_global_payload(struct mobj *mobj)
1672 {
1673 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1674 			mobj);
1675 }
1676 
1677 void thread_spmc_register_secondary_ep(vaddr_t ep)
1678 {
1679 	unsigned long ret = 0;
1680 
1681 	/* Let the SPM know the entry point for secondary CPUs */
1682 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
1683 
1684 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
1685 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
1686 }
1687 
1688 #if defined(CFG_CORE_SEL1_SPMC)
1689 static TEE_Result spmc_init(void)
1690 {
1691 	my_endpoint_id = SPMC_ENDPOINT_ID;
1692 	DMSG("My endpoint ID %#x", my_endpoint_id);
1693 
1694 	/*
1695 	 * If SPMD think we are version 1.0 it will report version 1.0 to
1696 	 * normal world regardless of what version we query the SPM with.
1697 	 * However, if SPMD think we are version 1.1 it will forward
1698 	 * queries from normal world to let us negotiate version. So by
1699 	 * setting version 1.0 here we should be compatible.
1700 	 *
1701 	 * Note that disagreement on negotiated version means that we'll
1702 	 * have communication problems with normal world.
1703 	 */
1704 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
1705 
1706 	return TEE_SUCCESS;
1707 }
1708 #else /* !defined(CFG_CORE_SEL1_SPMC) */
1709 static bool is_ffa_success(uint32_t fid)
1710 {
1711 #ifdef ARM64
1712 	if (fid == FFA_SUCCESS_64)
1713 		return true;
1714 #endif
1715 	return fid == FFA_SUCCESS_32;
1716 }
1717 
1718 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1719 {
1720 	struct thread_smc_args args = {
1721 #ifdef ARM64
1722 		.a0 = FFA_RXTX_MAP_64,
1723 #else
1724 		.a0 = FFA_RXTX_MAP_32,
1725 #endif
1726 		.a1 = virt_to_phys(rxtx->tx),
1727 		.a2 = virt_to_phys(rxtx->rx),
1728 		.a3 = 1,
1729 	};
1730 
1731 	thread_smccc(&args);
1732 	if (!is_ffa_success(args.a0)) {
1733 		if (args.a0 == FFA_ERROR)
1734 			EMSG("rxtx map failed with error %ld", args.a2);
1735 		else
1736 			EMSG("rxtx map failed");
1737 		panic();
1738 	}
1739 }
1740 
1741 static uint16_t get_my_id(void)
1742 {
1743 	struct thread_smc_args args = {
1744 		.a0 = FFA_ID_GET,
1745 	};
1746 
1747 	thread_smccc(&args);
1748 	if (!is_ffa_success(args.a0)) {
1749 		if (args.a0 == FFA_ERROR)
1750 			EMSG("Get id failed with error %ld", args.a2);
1751 		else
1752 			EMSG("Get id failed");
1753 		panic();
1754 	}
1755 
1756 	return args.a2;
1757 }
1758 
1759 static uint32_t get_ffa_version(uint32_t my_version)
1760 {
1761 	struct thread_smc_args args = {
1762 		.a0 = FFA_VERSION,
1763 		.a1 = my_version,
1764 	};
1765 
1766 	thread_smccc(&args);
1767 	if (args.a0 & BIT(31)) {
1768 		EMSG("FF-A version failed with error %ld", args.a0);
1769 		panic();
1770 	}
1771 
1772 	return args.a0;
1773 }
1774 
1775 static void *spmc_retrieve_req(uint64_t cookie,
1776 			       struct ffa_mem_transaction_x *trans)
1777 {
1778 	struct ffa_mem_access *acc_descr_array = NULL;
1779 	struct ffa_mem_access_perm *perm_descr = NULL;
1780 	struct thread_smc_args args = {
1781 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1782 		.a3 =	0,	/* Address, Using TX -> MBZ */
1783 		.a4 =   0,	/* Using TX -> MBZ */
1784 	};
1785 	size_t size = 0;
1786 	int rc = 0;
1787 
1788 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
1789 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
1790 
1791 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1792 		memset(trans_descr, 0, size);
1793 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1794 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1795 		trans_descr->global_handle = cookie;
1796 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1797 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1798 		trans_descr->mem_access_count = 1;
1799 		acc_descr_array = trans_descr->mem_access_array;
1800 	} else {
1801 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
1802 
1803 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1804 		memset(trans_descr, 0, size);
1805 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1806 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1807 		trans_descr->global_handle = cookie;
1808 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1809 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1810 		trans_descr->mem_access_count = 1;
1811 		trans_descr->mem_access_offs = sizeof(*trans_descr);
1812 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
1813 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
1814 					   sizeof(*trans_descr));
1815 	}
1816 	acc_descr_array->region_offs = 0;
1817 	acc_descr_array->reserved = 0;
1818 	perm_descr = &acc_descr_array->access_perm;
1819 	perm_descr->endpoint_id = my_endpoint_id;
1820 	perm_descr->perm = FFA_MEM_ACC_RW;
1821 	perm_descr->flags = 0;
1822 
1823 	args.a1 = size; /* Total Length */
1824 	args.a2 = size; /* Frag Length == Total length */
1825 	thread_smccc(&args);
1826 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1827 		if (args.a0 == FFA_ERROR)
1828 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1829 			     cookie, (int)args.a2);
1830 		else
1831 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1832 			     cookie, args.a0);
1833 		return NULL;
1834 	}
1835 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
1836 				       my_rxtx.size, trans);
1837 	if (rc) {
1838 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
1839 		     cookie, rc);
1840 		return NULL;
1841 	}
1842 
1843 	return my_rxtx.rx;
1844 }
1845 
1846 void thread_spmc_relinquish(uint64_t cookie)
1847 {
1848 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
1849 	struct thread_smc_args args = {
1850 		.a0 = FFA_MEM_RELINQUISH,
1851 	};
1852 
1853 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1854 	relinquish_desc->handle = cookie;
1855 	relinquish_desc->flags = 0;
1856 	relinquish_desc->endpoint_count = 1;
1857 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1858 	thread_smccc(&args);
1859 	if (!is_ffa_success(args.a0))
1860 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1861 }
1862 
1863 static int set_pages(struct ffa_address_range *regions,
1864 		     unsigned int num_regions, unsigned int num_pages,
1865 		     struct mobj_ffa *mf)
1866 {
1867 	unsigned int n = 0;
1868 	unsigned int idx = 0;
1869 
1870 	for (n = 0; n < num_regions; n++) {
1871 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1872 		uint64_t addr = READ_ONCE(regions[n].address);
1873 
1874 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1875 			return FFA_INVALID_PARAMETERS;
1876 	}
1877 
1878 	if (idx != num_pages)
1879 		return FFA_INVALID_PARAMETERS;
1880 
1881 	return 0;
1882 }
1883 
1884 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1885 {
1886 	struct mobj_ffa *ret = NULL;
1887 	struct ffa_mem_transaction_x retrieve_desc = { };
1888 	struct ffa_mem_access *descr_array = NULL;
1889 	struct ffa_mem_region *descr = NULL;
1890 	struct mobj_ffa *mf = NULL;
1891 	unsigned int num_pages = 0;
1892 	unsigned int offs = 0;
1893 	void *buf = NULL;
1894 	struct thread_smc_args ffa_rx_release_args = {
1895 		.a0 = FFA_RX_RELEASE
1896 	};
1897 
1898 	/*
1899 	 * OP-TEE is only supporting a single mem_region while the
1900 	 * specification allows for more than one.
1901 	 */
1902 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
1903 	if (!buf) {
1904 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1905 		     cookie);
1906 		return NULL;
1907 	}
1908 
1909 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
1910 	offs = READ_ONCE(descr_array->region_offs);
1911 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
1912 
1913 	num_pages = READ_ONCE(descr->total_page_count);
1914 	mf = mobj_ffa_spmc_new(cookie, num_pages);
1915 	if (!mf)
1916 		goto out;
1917 
1918 	if (set_pages(descr->address_range_array,
1919 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1920 		mobj_ffa_spmc_delete(mf);
1921 		goto out;
1922 	}
1923 
1924 	ret = mf;
1925 
1926 out:
1927 	/* Release RX buffer after the mem retrieve request. */
1928 	thread_smccc(&ffa_rx_release_args);
1929 
1930 	return ret;
1931 }
1932 
1933 static TEE_Result spmc_init(void)
1934 {
1935 	unsigned int major = 0;
1936 	unsigned int minor __maybe_unused = 0;
1937 	uint32_t my_vers = 0;
1938 	uint32_t vers = 0;
1939 
1940 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1941 	vers = get_ffa_version(my_vers);
1942 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
1943 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
1944 	DMSG("SPMC reported version %u.%u", major, minor);
1945 	if (major != FFA_VERSION_MAJOR) {
1946 		EMSG("Incompatible major version %u, expected %u",
1947 		     major, FFA_VERSION_MAJOR);
1948 		panic();
1949 	}
1950 	if (vers < my_vers)
1951 		my_vers = vers;
1952 	DMSG("Using version %u.%u",
1953 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
1954 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
1955 	my_rxtx.ffa_vers = my_vers;
1956 
1957 	spmc_rxtx_map(&my_rxtx);
1958 	my_endpoint_id = get_my_id();
1959 	DMSG("My endpoint ID %#x", my_endpoint_id);
1960 
1961 	return TEE_SUCCESS;
1962 }
1963 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
1964 
1965 /*
1966  * boot_final() is always done before exiting at end of boot
1967  * initialization.  In case of virtualization the init-calls are done only
1968  * once a OP-TEE partition has been created. So with virtualization we have
1969  * to initialize via boot_final() to make sure we have a value assigned
1970  * before it's used the first time.
1971  */
1972 #ifdef CFG_NS_VIRTUALIZATION
1973 boot_final(spmc_init);
1974 #else
1975 service_init(spmc_init);
1976 #endif
1977