xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 5c2c0fb31efbeff60960336d7438e810b825d582)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_private.h>
19 #include <kernel/thread_spmc.h>
20 #include <kernel/virtualization.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <optee_ffa.h>
24 #include <optee_msg.h>
25 #include <optee_rpc_cmd.h>
26 #include <string.h>
27 #include <sys/queue.h>
28 #include <tee/entry_std.h>
29 #include <tee/uuid.h>
30 #include <util.h>
31 
32 #if defined(CFG_CORE_SEL1_SPMC)
33 struct mem_share_state {
34 	struct mobj_ffa *mf;
35 	unsigned int page_count;
36 	unsigned int region_count;
37 	unsigned int current_page_idx;
38 };
39 
40 struct mem_frag_state {
41 	struct mem_share_state share;
42 	tee_mm_entry_t *mm;
43 	unsigned int frag_offset;
44 	SLIST_ENTRY(mem_frag_state) link;
45 };
46 #endif
47 
48 /* Initialized in spmc_init() below */
49 static uint16_t my_endpoint_id __nex_bss;
50 #ifdef CFG_CORE_SEL1_SPMC
51 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
52 				      FFA_PART_PROP_DIRECT_REQ_SEND |
53 #ifdef CFG_NS_VIRTUALIZATION
54 				      FFA_PART_PROP_NOTIF_CREATED |
55 				      FFA_PART_PROP_NOTIF_DESTROYED |
56 #endif
57 #ifdef ARM64
58 				      FFA_PART_PROP_AARCH64_STATE |
59 #endif
60 				      FFA_PART_PROP_IS_PE_ID;
61 
62 static uint32_t my_uuid_words[] = {
63 	/*
64 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
65 	 *   SP, or
66 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
67 	 *   logical partition, residing in the same exception level as the
68 	 *   SPMC
69 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
70 	 */
71 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
72 };
73 
74 /*
75  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
76  *
77  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
78  * access this includes the use of content of struct ffa_rxtx::rx and
79  * @frag_state_head.
80  *
81  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
82  * ffa_rxtx::tx and false when it is owned by normal world.
83  *
84  * Note that we can't prevent normal world from updating the content of
85  * these buffers so we must always be careful when reading. while we hold
86  * the lock.
87  */
88 
89 static struct ffa_rxtx my_rxtx __nex_bss;
90 
91 static bool is_nw_buf(struct ffa_rxtx *rxtx)
92 {
93 	return rxtx == &my_rxtx;
94 }
95 
96 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
97 	SLIST_HEAD_INITIALIZER(&frag_state_head);
98 #else
99 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
100 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
101 static struct ffa_rxtx my_rxtx = {
102 	.rx = __rx_buf,
103 	.tx = __tx_buf,
104 	.size = sizeof(__rx_buf),
105 };
106 #endif
107 
108 static uint32_t swap_src_dst(uint32_t src_dst)
109 {
110 	return (src_dst >> 16) | (src_dst << 16);
111 }
112 
113 static uint16_t get_sender_id(uint32_t src_dst)
114 {
115 	return src_dst >> 16;
116 }
117 
118 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
119 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
120 {
121 	*args = (struct thread_smc_args){ .a0 = fid,
122 					  .a1 = src_dst,
123 					  .a2 = w2,
124 					  .a3 = w3,
125 					  .a4 = w4,
126 					  .a5 = w5, };
127 }
128 
129 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
130 {
131 	/*
132 	 * No locking, if the caller does concurrent calls to this it's
133 	 * only making a mess for itself. We must be able to renegotiate
134 	 * the FF-A version in order to support differing versions between
135 	 * the loader and the driver.
136 	 */
137 	if (vers < FFA_VERSION_1_1)
138 		rxtx->ffa_vers = FFA_VERSION_1_0;
139 	else
140 		rxtx->ffa_vers = FFA_VERSION_1_1;
141 
142 	return rxtx->ffa_vers;
143 }
144 
145 #if defined(CFG_CORE_SEL1_SPMC)
146 static void handle_features(struct thread_smc_args *args)
147 {
148 	uint32_t ret_fid = 0;
149 	uint32_t ret_w2 = FFA_PARAM_MBZ;
150 
151 	switch (args->a1) {
152 #ifdef ARM64
153 	case FFA_RXTX_MAP_64:
154 #endif
155 	case FFA_RXTX_MAP_32:
156 		ret_fid = FFA_SUCCESS_32;
157 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
158 		break;
159 #ifdef ARM64
160 	case FFA_MEM_SHARE_64:
161 #endif
162 	case FFA_MEM_SHARE_32:
163 		ret_fid = FFA_SUCCESS_32;
164 		/*
165 		 * Partition manager supports transmission of a memory
166 		 * transaction descriptor in a buffer dynamically allocated
167 		 * by the endpoint.
168 		 */
169 		ret_w2 = BIT(0);
170 		break;
171 
172 	case FFA_ERROR:
173 	case FFA_VERSION:
174 	case FFA_SUCCESS_32:
175 #ifdef ARM64
176 	case FFA_SUCCESS_64:
177 #endif
178 	case FFA_FEATURES:
179 	case FFA_SPM_ID_GET:
180 	case FFA_MEM_FRAG_TX:
181 	case FFA_MEM_RECLAIM:
182 	case FFA_MSG_SEND_DIRECT_REQ_32:
183 	case FFA_INTERRUPT:
184 	case FFA_PARTITION_INFO_GET:
185 	case FFA_RXTX_UNMAP:
186 	case FFA_RX_RELEASE:
187 	case FFA_FEATURE_MANAGED_EXIT_INTR:
188 		ret_fid = FFA_SUCCESS_32;
189 		break;
190 	default:
191 		ret_fid = FFA_ERROR;
192 		ret_w2 = FFA_NOT_SUPPORTED;
193 		break;
194 	}
195 
196 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
197 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
198 }
199 
200 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
201 {
202 	tee_mm_entry_t *mm = NULL;
203 
204 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
205 		return FFA_INVALID_PARAMETERS;
206 
207 	mm = tee_mm_alloc(&tee_mm_shm, sz);
208 	if (!mm)
209 		return FFA_NO_MEMORY;
210 
211 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
212 					  sz / SMALL_PAGE_SIZE,
213 					  MEM_AREA_NSEC_SHM)) {
214 		tee_mm_free(mm);
215 		return FFA_INVALID_PARAMETERS;
216 	}
217 
218 	*va_ret = (void *)tee_mm_get_smem(mm);
219 	return 0;
220 }
221 
222 static void handle_spm_id_get(struct thread_smc_args *args)
223 {
224 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
225 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
226 }
227 
228 static void unmap_buf(void *va, size_t sz)
229 {
230 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
231 
232 	assert(mm);
233 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
234 	tee_mm_free(mm);
235 }
236 
237 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
238 {
239 	int rc = 0;
240 	uint32_t ret_fid = FFA_ERROR;
241 	unsigned int sz = 0;
242 	paddr_t rx_pa = 0;
243 	paddr_t tx_pa = 0;
244 	void *rx = NULL;
245 	void *tx = NULL;
246 
247 	cpu_spin_lock(&rxtx->spinlock);
248 
249 	if (args->a3 & GENMASK_64(63, 6)) {
250 		rc = FFA_INVALID_PARAMETERS;
251 		goto out;
252 	}
253 
254 	sz = args->a3 * SMALL_PAGE_SIZE;
255 	if (!sz) {
256 		rc = FFA_INVALID_PARAMETERS;
257 		goto out;
258 	}
259 	/* TX/RX are swapped compared to the caller */
260 	tx_pa = args->a2;
261 	rx_pa = args->a1;
262 
263 	if (rxtx->size) {
264 		rc = FFA_DENIED;
265 		goto out;
266 	}
267 
268 	/*
269 	 * If the buffer comes from a SP the address is virtual and already
270 	 * mapped.
271 	 */
272 	if (is_nw_buf(rxtx)) {
273 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
274 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
275 			bool tx_alloced = false;
276 
277 			/*
278 			 * With virtualization we establish this mapping in
279 			 * the nexus mapping which then is replicated to
280 			 * each partition.
281 			 *
282 			 * This means that this mapping must be done before
283 			 * any partition is created and then must not be
284 			 * changed.
285 			 */
286 
287 			/*
288 			 * core_mmu_add_mapping() may reuse previous
289 			 * mappings. First check if there's any mappings to
290 			 * reuse so we know how to clean up in case of
291 			 * failure.
292 			 */
293 			tx = phys_to_virt(tx_pa, mt, sz);
294 			rx = phys_to_virt(rx_pa, mt, sz);
295 			if (!tx) {
296 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
297 				if (!tx) {
298 					rc = FFA_NO_MEMORY;
299 					goto out;
300 				}
301 				tx_alloced = true;
302 			}
303 			if (!rx)
304 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
305 
306 			if (!rx) {
307 				if (tx_alloced && tx)
308 					core_mmu_remove_mapping(mt, tx, sz);
309 				rc = FFA_NO_MEMORY;
310 				goto out;
311 			}
312 		} else {
313 			rc = map_buf(tx_pa, sz, &tx);
314 			if (rc)
315 				goto out;
316 			rc = map_buf(rx_pa, sz, &rx);
317 			if (rc) {
318 				unmap_buf(tx, sz);
319 				goto out;
320 			}
321 		}
322 		rxtx->tx = tx;
323 		rxtx->rx = rx;
324 	} else {
325 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
326 			rc = FFA_INVALID_PARAMETERS;
327 			goto out;
328 		}
329 
330 		if (!virt_to_phys((void *)tx_pa) ||
331 		    !virt_to_phys((void *)rx_pa)) {
332 			rc = FFA_INVALID_PARAMETERS;
333 			goto out;
334 		}
335 
336 		rxtx->tx = (void *)tx_pa;
337 		rxtx->rx = (void *)rx_pa;
338 	}
339 
340 	rxtx->size = sz;
341 	rxtx->tx_is_mine = true;
342 	ret_fid = FFA_SUCCESS_32;
343 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
344 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
345 out:
346 	cpu_spin_unlock(&rxtx->spinlock);
347 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
348 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
349 }
350 
351 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
352 {
353 	uint32_t ret_fid = FFA_ERROR;
354 	int rc = FFA_INVALID_PARAMETERS;
355 
356 	cpu_spin_lock(&rxtx->spinlock);
357 
358 	if (!rxtx->size)
359 		goto out;
360 
361 	/* We don't unmap the SP memory as the SP might still use it */
362 	if (is_nw_buf(rxtx)) {
363 		unmap_buf(rxtx->rx, rxtx->size);
364 		unmap_buf(rxtx->tx, rxtx->size);
365 	}
366 	rxtx->size = 0;
367 	rxtx->rx = NULL;
368 	rxtx->tx = NULL;
369 	ret_fid = FFA_SUCCESS_32;
370 	rc = 0;
371 out:
372 	cpu_spin_unlock(&rxtx->spinlock);
373 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
374 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
375 }
376 
377 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
378 {
379 	uint32_t ret_fid = 0;
380 	int rc = 0;
381 
382 	cpu_spin_lock(&rxtx->spinlock);
383 	/* The senders RX is our TX */
384 	if (!rxtx->size || rxtx->tx_is_mine) {
385 		ret_fid = FFA_ERROR;
386 		rc = FFA_DENIED;
387 	} else {
388 		ret_fid = FFA_SUCCESS_32;
389 		rc = 0;
390 		rxtx->tx_is_mine = true;
391 	}
392 	cpu_spin_unlock(&rxtx->spinlock);
393 
394 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
395 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
396 }
397 
398 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
399 {
400 	return !w0 && !w1 && !w2 && !w3;
401 }
402 
403 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
404 {
405 	/*
406 	 * This depends on which UUID we have been assigned.
407 	 * TODO add a generic mechanism to obtain our UUID.
408 	 *
409 	 * The test below is for the hard coded UUID
410 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
411 	 */
412 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
413 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
414 }
415 
416 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
417 				     size_t idx, uint16_t endpoint_id,
418 				     uint16_t execution_context,
419 				     uint32_t part_props,
420 				     const uint32_t uuid_words[4])
421 {
422 	struct ffa_partition_info_x *fpi = NULL;
423 	size_t fpi_size = sizeof(*fpi);
424 
425 	if (ffa_vers >= FFA_VERSION_1_1)
426 		fpi_size += FFA_UUID_SIZE;
427 
428 	if ((idx + 1) * fpi_size > blen)
429 		return TEE_ERROR_OUT_OF_MEMORY;
430 
431 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
432 	fpi->id = endpoint_id;
433 	/* Number of execution contexts implemented by this partition */
434 	fpi->execution_context = execution_context;
435 
436 	fpi->partition_properties = part_props;
437 
438 	if (ffa_vers >= FFA_VERSION_1_1) {
439 		if (uuid_words)
440 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
441 		else
442 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
443 	}
444 
445 	return TEE_SUCCESS;
446 }
447 
448 static int handle_partition_info_get_all(size_t *elem_count,
449 					 struct ffa_rxtx *rxtx, bool count_only)
450 {
451 	if (!count_only) {
452 		/* Add OP-TEE SP */
453 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
454 					      rxtx->size, 0, my_endpoint_id,
455 					      CFG_TEE_CORE_NB_CORE,
456 					      my_part_props, my_uuid_words))
457 			return FFA_NO_MEMORY;
458 	}
459 	*elem_count = 1;
460 
461 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
462 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
463 					  NULL, elem_count, count_only))
464 			return FFA_NO_MEMORY;
465 	}
466 
467 	return FFA_OK;
468 }
469 
470 void spmc_handle_partition_info_get(struct thread_smc_args *args,
471 				    struct ffa_rxtx *rxtx)
472 {
473 	TEE_Result res = TEE_SUCCESS;
474 	uint32_t ret_fid = FFA_ERROR;
475 	uint32_t rc = 0;
476 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
477 
478 	if (!count_only) {
479 		cpu_spin_lock(&rxtx->spinlock);
480 
481 		if (!rxtx->size || !rxtx->tx_is_mine) {
482 			rc = FFA_BUSY;
483 			goto out;
484 		}
485 	}
486 
487 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
488 		size_t elem_count = 0;
489 
490 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
491 							count_only);
492 
493 		if (ret_fid) {
494 			rc = ret_fid;
495 			ret_fid = FFA_ERROR;
496 		} else {
497 			ret_fid = FFA_SUCCESS_32;
498 			rc = elem_count;
499 		}
500 
501 		goto out;
502 	}
503 
504 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
505 		if (!count_only) {
506 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
507 							rxtx->tx, rxtx->size, 0,
508 							my_endpoint_id,
509 							CFG_TEE_CORE_NB_CORE,
510 							my_part_props,
511 							my_uuid_words);
512 			if (res) {
513 				ret_fid = FFA_ERROR;
514 				rc = FFA_INVALID_PARAMETERS;
515 				goto out;
516 			}
517 		}
518 		rc = 1;
519 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
520 		uint32_t uuid_array[4] = { 0 };
521 		TEE_UUID uuid = { };
522 		size_t count = 0;
523 
524 		uuid_array[0] = args->a1;
525 		uuid_array[1] = args->a2;
526 		uuid_array[2] = args->a3;
527 		uuid_array[3] = args->a4;
528 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
529 
530 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
531 					    rxtx->size, &uuid, &count,
532 					    count_only);
533 		if (res != TEE_SUCCESS) {
534 			ret_fid = FFA_ERROR;
535 			rc = FFA_INVALID_PARAMETERS;
536 			goto out;
537 		}
538 		rc = count;
539 	} else {
540 		ret_fid = FFA_ERROR;
541 		rc = FFA_INVALID_PARAMETERS;
542 		goto out;
543 	}
544 
545 	ret_fid = FFA_SUCCESS_32;
546 
547 out:
548 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
549 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
550 	if (!count_only) {
551 		rxtx->tx_is_mine = false;
552 		cpu_spin_unlock(&rxtx->spinlock);
553 	}
554 }
555 
556 static void spmc_handle_run(struct thread_smc_args *args)
557 {
558 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
559 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
560 	uint32_t rc = FFA_OK;
561 
562 	if (endpoint != my_endpoint_id) {
563 		/*
564 		 * The endpoint should be an SP, try to resume the SP from
565 		 * preempted into busy state.
566 		 */
567 		rc = spmc_sp_resume_from_preempted(endpoint);
568 		if (rc)
569 			goto out;
570 	}
571 
572 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
573 
574 	/* thread_resume_from_rpc return only of the thread_id is invalid */
575 	rc = FFA_INVALID_PARAMETERS;
576 
577 out:
578 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
579 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
580 }
581 #endif /*CFG_CORE_SEL1_SPMC*/
582 
583 static void handle_yielding_call(struct thread_smc_args *args)
584 {
585 	TEE_Result res = 0;
586 
587 	thread_check_canaries();
588 
589 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
590 		/* Note connection to struct thread_rpc_arg::ret */
591 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
592 				       0);
593 		res = TEE_ERROR_BAD_PARAMETERS;
594 	} else {
595 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
596 				     args->a6, args->a7);
597 		res = TEE_ERROR_BUSY;
598 	}
599 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
600 		      swap_src_dst(args->a1), 0, res, 0, 0);
601 }
602 
603 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
604 {
605 	uint64_t cookie = reg_pair_to_64(a5, a4);
606 	uint32_t res = 0;
607 
608 	res = mobj_ffa_unregister_by_cookie(cookie);
609 	switch (res) {
610 	case TEE_SUCCESS:
611 	case TEE_ERROR_ITEM_NOT_FOUND:
612 		return 0;
613 	case TEE_ERROR_BUSY:
614 		EMSG("res %#"PRIx32, res);
615 		return FFA_BUSY;
616 	default:
617 		EMSG("res %#"PRIx32, res);
618 		return FFA_INVALID_PARAMETERS;
619 	}
620 }
621 
622 static void handle_blocking_call(struct thread_smc_args *args)
623 {
624 	switch (args->a3) {
625 	case OPTEE_FFA_GET_API_VERSION:
626 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
627 			      swap_src_dst(args->a1), 0,
628 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
629 			      0);
630 		break;
631 	case OPTEE_FFA_GET_OS_VERSION:
632 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
633 			      swap_src_dst(args->a1), 0,
634 			      CFG_OPTEE_REVISION_MAJOR,
635 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
636 		break;
637 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
638 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
639 			      swap_src_dst(args->a1), 0, 0,
640 			      THREAD_RPC_MAX_NUM_PARAMS,
641 			      OPTEE_FFA_SEC_CAP_ARG_OFFSET);
642 		break;
643 	case OPTEE_FFA_UNREGISTER_SHM:
644 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
645 			      swap_src_dst(args->a1), 0,
646 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
647 		break;
648 	default:
649 		EMSG("Unhandled blocking service ID %#"PRIx32,
650 		     (uint32_t)args->a3);
651 		panic();
652 	}
653 }
654 
655 static void handle_framework_direct_request(struct thread_smc_args *args,
656 					    struct ffa_rxtx *rxtx)
657 {
658 	uint32_t w0 = FFA_ERROR;
659 	uint32_t w1 = FFA_PARAM_MBZ;
660 	uint32_t w2 = FFA_NOT_SUPPORTED;
661 	uint32_t w3 = FFA_PARAM_MBZ;
662 
663 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
664 	case FFA_MSG_SEND_VM_CREATED:
665 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
666 			uint16_t guest_id = args->a5;
667 			TEE_Result res = virt_guest_created(guest_id);
668 
669 			w0 = FFA_MSG_SEND_DIRECT_RESP_32;
670 			w1 = swap_src_dst(args->a1);
671 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
672 			if (res == TEE_SUCCESS)
673 				w3 = FFA_OK;
674 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
675 				w3 = FFA_DENIED;
676 			else
677 				w3 = FFA_INVALID_PARAMETERS;
678 		}
679 		break;
680 	case FFA_MSG_SEND_VM_DESTROYED:
681 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
682 			uint16_t guest_id = args->a5;
683 			TEE_Result res = virt_guest_destroyed(guest_id);
684 
685 			w0 = FFA_MSG_SEND_DIRECT_RESP_32;
686 			w1 = swap_src_dst(args->a1);
687 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
688 			if (res == TEE_SUCCESS)
689 				w3 = FFA_OK;
690 			else
691 				w3 = FFA_INVALID_PARAMETERS;
692 		}
693 		break;
694 	case FFA_MSG_VERSION_REQ:
695 		w0 = FFA_MSG_SEND_DIRECT_RESP_32;
696 		w1 = swap_src_dst(args->a1);
697 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
698 		w3 = spmc_exchange_version(args->a3, rxtx);
699 		break;
700 	default:
701 		break;
702 	}
703 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
704 }
705 
706 static void handle_direct_request(struct thread_smc_args *args,
707 				  struct ffa_rxtx *rxtx)
708 {
709 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
710 	    FFA_DST(args->a1) != my_endpoint_id) {
711 		spmc_sp_start_thread(args);
712 		return;
713 	}
714 
715 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
716 		handle_framework_direct_request(args, rxtx);
717 		return;
718 	}
719 
720 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
721 	    virt_set_guest(get_sender_id(args->a1))) {
722 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
723 			      swap_src_dst(args->a1), 0,
724 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
725 		return;
726 	}
727 
728 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
729 		handle_yielding_call(args);
730 	else
731 		handle_blocking_call(args);
732 
733 	/*
734 	 * Note that handle_yielding_call() typically only returns if a
735 	 * thread cannot be allocated or found. virt_unset_guest() is also
736 	 * called from thread_state_suspend() and thread_state_free().
737 	 */
738 	virt_unset_guest();
739 }
740 
741 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
742 			      struct ffa_mem_transaction_x *trans)
743 {
744 	uint16_t mem_reg_attr = 0;
745 	uint32_t flags = 0;
746 	uint32_t count = 0;
747 	uint32_t offs = 0;
748 	uint32_t size = 0;
749 	size_t n = 0;
750 
751 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
752 		return FFA_INVALID_PARAMETERS;
753 
754 	if (ffa_vers >= FFA_VERSION_1_1) {
755 		struct ffa_mem_transaction_1_1 *descr = NULL;
756 
757 		if (blen < sizeof(*descr))
758 			return FFA_INVALID_PARAMETERS;
759 
760 		descr = buf;
761 		trans->sender_id = READ_ONCE(descr->sender_id);
762 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
763 		flags = READ_ONCE(descr->flags);
764 		trans->global_handle = READ_ONCE(descr->global_handle);
765 		trans->tag = READ_ONCE(descr->tag);
766 
767 		count = READ_ONCE(descr->mem_access_count);
768 		size = READ_ONCE(descr->mem_access_size);
769 		offs = READ_ONCE(descr->mem_access_offs);
770 	} else {
771 		struct ffa_mem_transaction_1_0 *descr = NULL;
772 
773 		if (blen < sizeof(*descr))
774 			return FFA_INVALID_PARAMETERS;
775 
776 		descr = buf;
777 		trans->sender_id = READ_ONCE(descr->sender_id);
778 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
779 		flags = READ_ONCE(descr->flags);
780 		trans->global_handle = READ_ONCE(descr->global_handle);
781 		trans->tag = READ_ONCE(descr->tag);
782 
783 		count = READ_ONCE(descr->mem_access_count);
784 		size = sizeof(struct ffa_mem_access);
785 		offs = offsetof(struct ffa_mem_transaction_1_0,
786 				mem_access_array);
787 	}
788 
789 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
790 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
791 		return FFA_INVALID_PARAMETERS;
792 
793 	/* Check that the endpoint memory access descriptor array fits */
794 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
795 	    n > blen)
796 		return FFA_INVALID_PARAMETERS;
797 
798 	trans->mem_reg_attr = mem_reg_attr;
799 	trans->flags = flags;
800 	trans->mem_access_size = size;
801 	trans->mem_access_count = count;
802 	trans->mem_access_offs = offs;
803 	return 0;
804 }
805 
806 #if defined(CFG_CORE_SEL1_SPMC)
807 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
808 			 unsigned int mem_access_count, uint8_t *acc_perms,
809 			 unsigned int *region_offs)
810 {
811 	struct ffa_mem_access_perm *descr = NULL;
812 	struct ffa_mem_access *mem_acc = NULL;
813 	unsigned int n = 0;
814 
815 	for (n = 0; n < mem_access_count; n++) {
816 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
817 		descr = &mem_acc->access_perm;
818 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
819 			*acc_perms = READ_ONCE(descr->perm);
820 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
821 			return 0;
822 		}
823 	}
824 
825 	return FFA_INVALID_PARAMETERS;
826 }
827 
828 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
829 			  size_t blen, unsigned int *page_count,
830 			  unsigned int *region_count, size_t *addr_range_offs)
831 {
832 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
833 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
834 	struct ffa_mem_region *region_descr = NULL;
835 	unsigned int region_descr_offs = 0;
836 	uint8_t mem_acc_perm = 0;
837 	size_t n = 0;
838 
839 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
840 		return FFA_INVALID_PARAMETERS;
841 
842 	/* Check that the access permissions matches what's expected */
843 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
844 			  mem_trans->mem_access_size,
845 			  mem_trans->mem_access_count,
846 			  &mem_acc_perm, &region_descr_offs) ||
847 	    mem_acc_perm != exp_mem_acc_perm)
848 		return FFA_INVALID_PARAMETERS;
849 
850 	/* Check that the Composite memory region descriptor fits */
851 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
852 	    n > blen)
853 		return FFA_INVALID_PARAMETERS;
854 
855 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
856 				  struct ffa_mem_region))
857 		return FFA_INVALID_PARAMETERS;
858 
859 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
860 						 region_descr_offs);
861 	*page_count = READ_ONCE(region_descr->total_page_count);
862 	*region_count = READ_ONCE(region_descr->address_range_count);
863 	*addr_range_offs = n;
864 	return 0;
865 }
866 
867 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
868 				size_t flen)
869 {
870 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
871 	struct ffa_address_range *arange = NULL;
872 	unsigned int n = 0;
873 
874 	if (region_count > s->region_count)
875 		region_count = s->region_count;
876 
877 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
878 		return FFA_INVALID_PARAMETERS;
879 	arange = buf;
880 
881 	for (n = 0; n < region_count; n++) {
882 		unsigned int page_count = READ_ONCE(arange[n].page_count);
883 		uint64_t addr = READ_ONCE(arange[n].address);
884 
885 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
886 					  addr, page_count))
887 			return FFA_INVALID_PARAMETERS;
888 	}
889 
890 	s->region_count -= region_count;
891 	if (s->region_count)
892 		return region_count * sizeof(*arange);
893 
894 	if (s->current_page_idx != s->page_count)
895 		return FFA_INVALID_PARAMETERS;
896 
897 	return 0;
898 }
899 
900 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
901 {
902 	int rc = 0;
903 
904 	rc = add_mem_share_helper(&s->share, buf, flen);
905 	if (rc >= 0) {
906 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
907 			/* We're not at the end of the descriptor yet */
908 			if (s->share.region_count)
909 				return s->frag_offset;
910 
911 			/* We're done */
912 			rc = 0;
913 		} else {
914 			rc = FFA_INVALID_PARAMETERS;
915 		}
916 	}
917 
918 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
919 	if (rc < 0)
920 		mobj_ffa_sel1_spmc_delete(s->share.mf);
921 	else
922 		mobj_ffa_push_to_inactive(s->share.mf);
923 	free(s);
924 
925 	return rc;
926 }
927 
928 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
929 			void *buf)
930 {
931 	struct ffa_mem_access_perm *perm = NULL;
932 	struct ffa_mem_access *mem_acc = NULL;
933 
934 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
935 		return false;
936 
937 	if (mem_trans->mem_access_count < 1)
938 		return false;
939 
940 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
941 	perm = &mem_acc->access_perm;
942 
943 	/*
944 	 * perm->endpoint_id is read here only to check if the endpoint is
945 	 * OP-TEE. We do read it later on again, but there are some additional
946 	 * checks there to make sure that the data is correct.
947 	 */
948 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
949 }
950 
951 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
952 			 tee_mm_entry_t *mm, void *buf, size_t blen,
953 			 size_t flen, uint64_t *global_handle)
954 {
955 	int rc = 0;
956 	struct mem_share_state share = { };
957 	size_t addr_range_offs = 0;
958 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
959 	size_t n = 0;
960 
961 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
962 			    &share.region_count, &addr_range_offs);
963 	if (rc)
964 		return rc;
965 
966 	if (MUL_OVERFLOW(share.region_count,
967 			 sizeof(struct ffa_address_range), &n) ||
968 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
969 		return FFA_INVALID_PARAMETERS;
970 
971 	if (mem_trans->global_handle)
972 		cookie = mem_trans->global_handle;
973 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
974 	if (!share.mf)
975 		return FFA_NO_MEMORY;
976 
977 	if (flen != blen) {
978 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
979 
980 		if (!s) {
981 			rc = FFA_NO_MEMORY;
982 			goto err;
983 		}
984 		s->share = share;
985 		s->mm = mm;
986 		s->frag_offset = addr_range_offs;
987 
988 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
989 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
990 					flen - addr_range_offs);
991 
992 		if (rc >= 0)
993 			*global_handle = mobj_ffa_get_cookie(share.mf);
994 
995 		return rc;
996 	}
997 
998 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
999 				  flen - addr_range_offs);
1000 	if (rc) {
1001 		/*
1002 		 * Number of consumed bytes may be returned instead of 0 for
1003 		 * done.
1004 		 */
1005 		rc = FFA_INVALID_PARAMETERS;
1006 		goto err;
1007 	}
1008 
1009 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1010 
1011 	return 0;
1012 err:
1013 	mobj_ffa_sel1_spmc_delete(share.mf);
1014 	return rc;
1015 }
1016 
1017 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1018 				 unsigned int page_count,
1019 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1020 {
1021 	struct ffa_mem_transaction_x mem_trans = { };
1022 	int rc = 0;
1023 	size_t len = 0;
1024 	void *buf = NULL;
1025 	tee_mm_entry_t *mm = NULL;
1026 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1027 
1028 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1029 		return FFA_INVALID_PARAMETERS;
1030 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1031 		return FFA_INVALID_PARAMETERS;
1032 
1033 	/*
1034 	 * Check that the length reported in flen is covered by len even
1035 	 * if the offset is taken into account.
1036 	 */
1037 	if (len < flen || len - offs < flen)
1038 		return FFA_INVALID_PARAMETERS;
1039 
1040 	mm = tee_mm_alloc(&tee_mm_shm, len);
1041 	if (!mm)
1042 		return FFA_NO_MEMORY;
1043 
1044 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1045 					  page_count, MEM_AREA_NSEC_SHM)) {
1046 		rc = FFA_INVALID_PARAMETERS;
1047 		goto out;
1048 	}
1049 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1050 
1051 	cpu_spin_lock(&rxtx->spinlock);
1052 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1053 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1054 	    virt_set_guest(mem_trans.sender_id))
1055 		rc = FFA_DENIED;
1056 	if (!rc)
1057 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1058 				   global_handle);
1059 	virt_unset_guest();
1060 	cpu_spin_unlock(&rxtx->spinlock);
1061 	if (rc > 0)
1062 		return rc;
1063 
1064 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1065 out:
1066 	tee_mm_free(mm);
1067 	return rc;
1068 }
1069 
1070 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1071 				  uint64_t *global_handle,
1072 				  struct ffa_rxtx *rxtx)
1073 {
1074 	struct ffa_mem_transaction_x mem_trans = { };
1075 	int rc = FFA_DENIED;
1076 
1077 	cpu_spin_lock(&rxtx->spinlock);
1078 
1079 	if (!rxtx->rx || flen > rxtx->size)
1080 		goto out;
1081 
1082 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1083 				       &mem_trans);
1084 	if (rc)
1085 		goto out;
1086 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1087 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1088 				       global_handle, NULL);
1089 		goto out;
1090 	}
1091 
1092 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1093 	    virt_set_guest(mem_trans.sender_id))
1094 		goto out;
1095 
1096 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1097 			   global_handle);
1098 
1099 	virt_unset_guest();
1100 
1101 out:
1102 	cpu_spin_unlock(&rxtx->spinlock);
1103 
1104 	return rc;
1105 }
1106 
1107 static void handle_mem_share(struct thread_smc_args *args,
1108 			     struct ffa_rxtx *rxtx)
1109 {
1110 	uint32_t tot_len = args->a1;
1111 	uint32_t frag_len = args->a2;
1112 	uint64_t addr = args->a3;
1113 	uint32_t page_count = args->a4;
1114 	uint32_t ret_w1 = 0;
1115 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1116 	uint32_t ret_w3 = 0;
1117 	uint32_t ret_fid = FFA_ERROR;
1118 	uint64_t global_handle = 0;
1119 	int rc = 0;
1120 
1121 	/* Check that the MBZs are indeed 0 */
1122 	if (args->a5 || args->a6 || args->a7)
1123 		goto out;
1124 
1125 	/* Check that fragment length doesn't exceed total length */
1126 	if (frag_len > tot_len)
1127 		goto out;
1128 
1129 	/* Check for 32-bit calling convention */
1130 	if (args->a0 == FFA_MEM_SHARE_32)
1131 		addr &= UINT32_MAX;
1132 
1133 	if (!addr) {
1134 		/*
1135 		 * The memory transaction descriptor is passed via our rx
1136 		 * buffer.
1137 		 */
1138 		if (page_count)
1139 			goto out;
1140 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1141 					    rxtx);
1142 	} else {
1143 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1144 					   &global_handle, rxtx);
1145 	}
1146 	if (rc < 0) {
1147 		ret_w2 = rc;
1148 	} else if (rc > 0) {
1149 		ret_fid = FFA_MEM_FRAG_RX;
1150 		ret_w3 = rc;
1151 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1152 	} else {
1153 		ret_fid = FFA_SUCCESS_32;
1154 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1155 	}
1156 out:
1157 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1158 }
1159 
1160 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1161 {
1162 	struct mem_frag_state *s = NULL;
1163 
1164 	SLIST_FOREACH(s, &frag_state_head, link)
1165 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1166 			return s;
1167 
1168 	return NULL;
1169 }
1170 
1171 static void handle_mem_frag_tx(struct thread_smc_args *args,
1172 			       struct ffa_rxtx *rxtx)
1173 {
1174 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1175 	size_t flen = args->a3;
1176 	uint32_t endpoint_id = args->a4;
1177 	struct mem_frag_state *s = NULL;
1178 	tee_mm_entry_t *mm = NULL;
1179 	unsigned int page_count = 0;
1180 	void *buf = NULL;
1181 	uint32_t ret_w1 = 0;
1182 	uint32_t ret_w2 = 0;
1183 	uint32_t ret_w3 = 0;
1184 	uint32_t ret_fid = 0;
1185 	int rc = 0;
1186 
1187 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1188 		uint16_t guest_id = endpoint_id >> 16;
1189 
1190 		if (!guest_id || virt_set_guest(guest_id)) {
1191 			rc = FFA_INVALID_PARAMETERS;
1192 			goto out_set_rc;
1193 		}
1194 	}
1195 
1196 	/*
1197 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1198 	 * requests.
1199 	 */
1200 
1201 	cpu_spin_lock(&rxtx->spinlock);
1202 
1203 	s = get_frag_state(global_handle);
1204 	if (!s) {
1205 		rc = FFA_INVALID_PARAMETERS;
1206 		goto out;
1207 	}
1208 
1209 	mm = s->mm;
1210 	if (mm) {
1211 		if (flen > tee_mm_get_bytes(mm)) {
1212 			rc = FFA_INVALID_PARAMETERS;
1213 			goto out;
1214 		}
1215 		page_count = s->share.page_count;
1216 		buf = (void *)tee_mm_get_smem(mm);
1217 	} else {
1218 		if (flen > rxtx->size) {
1219 			rc = FFA_INVALID_PARAMETERS;
1220 			goto out;
1221 		}
1222 		buf = rxtx->rx;
1223 	}
1224 
1225 	rc = add_mem_share_frag(s, buf, flen);
1226 out:
1227 	virt_unset_guest();
1228 	cpu_spin_unlock(&rxtx->spinlock);
1229 
1230 	if (rc <= 0 && mm) {
1231 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1232 		tee_mm_free(mm);
1233 	}
1234 
1235 out_set_rc:
1236 	if (rc < 0) {
1237 		ret_fid = FFA_ERROR;
1238 		ret_w2 = rc;
1239 	} else if (rc > 0) {
1240 		ret_fid = FFA_MEM_FRAG_RX;
1241 		ret_w3 = rc;
1242 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1243 	} else {
1244 		ret_fid = FFA_SUCCESS_32;
1245 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1246 	}
1247 
1248 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1249 }
1250 
1251 static void handle_mem_reclaim(struct thread_smc_args *args)
1252 {
1253 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1254 	uint32_t ret_fid = FFA_ERROR;
1255 	uint64_t cookie = 0;
1256 
1257 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1258 		goto out;
1259 
1260 	cookie = reg_pair_to_64(args->a2, args->a1);
1261 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1262 		uint16_t guest_id = 0;
1263 
1264 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1265 			guest_id = virt_find_guest_by_cookie(cookie);
1266 		} else {
1267 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1268 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1269 		}
1270 		if (!guest_id || virt_set_guest(guest_id))
1271 			goto out;
1272 	}
1273 
1274 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1275 	case TEE_SUCCESS:
1276 		ret_fid = FFA_SUCCESS_32;
1277 		ret_val = 0;
1278 		break;
1279 	case TEE_ERROR_ITEM_NOT_FOUND:
1280 		DMSG("cookie %#"PRIx64" not found", cookie);
1281 		ret_val = FFA_INVALID_PARAMETERS;
1282 		break;
1283 	default:
1284 		DMSG("cookie %#"PRIx64" busy", cookie);
1285 		ret_val = FFA_DENIED;
1286 		break;
1287 	}
1288 
1289 	virt_unset_guest();
1290 
1291 out:
1292 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
1293 }
1294 #endif
1295 
1296 /* Only called from assembly */
1297 void thread_spmc_msg_recv(struct thread_smc_args *args);
1298 void thread_spmc_msg_recv(struct thread_smc_args *args)
1299 {
1300 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1301 	switch (args->a0) {
1302 #if defined(CFG_CORE_SEL1_SPMC)
1303 	case FFA_FEATURES:
1304 		handle_features(args);
1305 		break;
1306 	case FFA_SPM_ID_GET:
1307 		handle_spm_id_get(args);
1308 		break;
1309 #ifdef ARM64
1310 	case FFA_RXTX_MAP_64:
1311 #endif
1312 	case FFA_RXTX_MAP_32:
1313 		spmc_handle_rxtx_map(args, &my_rxtx);
1314 		break;
1315 	case FFA_RXTX_UNMAP:
1316 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1317 		break;
1318 	case FFA_RX_RELEASE:
1319 		spmc_handle_rx_release(args, &my_rxtx);
1320 		break;
1321 	case FFA_PARTITION_INFO_GET:
1322 		spmc_handle_partition_info_get(args, &my_rxtx);
1323 		break;
1324 	case FFA_RUN:
1325 		spmc_handle_run(args);
1326 		break;
1327 #endif /*CFG_CORE_SEL1_SPMC*/
1328 	case FFA_INTERRUPT:
1329 		interrupt_main_handler();
1330 		spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1331 		break;
1332 #ifdef ARM64
1333 	case FFA_MSG_SEND_DIRECT_REQ_64:
1334 #endif
1335 	case FFA_MSG_SEND_DIRECT_REQ_32:
1336 		handle_direct_request(args, &my_rxtx);
1337 		break;
1338 #if defined(CFG_CORE_SEL1_SPMC)
1339 #ifdef ARM64
1340 	case FFA_MEM_SHARE_64:
1341 #endif
1342 	case FFA_MEM_SHARE_32:
1343 		handle_mem_share(args, &my_rxtx);
1344 		break;
1345 	case FFA_MEM_RECLAIM:
1346 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1347 		    !ffa_mem_reclaim(args, NULL))
1348 			handle_mem_reclaim(args);
1349 		break;
1350 	case FFA_MEM_FRAG_TX:
1351 		handle_mem_frag_tx(args, &my_rxtx);
1352 		break;
1353 #endif /*CFG_CORE_SEL1_SPMC*/
1354 	default:
1355 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1356 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1357 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1358 	}
1359 }
1360 
1361 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1362 {
1363 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1364 	struct thread_ctx *thr = threads + thread_get_id();
1365 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1366 	struct optee_msg_arg *arg = NULL;
1367 	struct mobj *mobj = NULL;
1368 	uint32_t num_params = 0;
1369 	size_t sz = 0;
1370 
1371 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1372 	if (!mobj) {
1373 		EMSG("Can't find cookie %#"PRIx64, cookie);
1374 		return TEE_ERROR_BAD_PARAMETERS;
1375 	}
1376 
1377 	res = mobj_inc_map(mobj);
1378 	if (res)
1379 		goto out_put_mobj;
1380 
1381 	res = TEE_ERROR_BAD_PARAMETERS;
1382 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1383 	if (!arg)
1384 		goto out_dec_map;
1385 
1386 	num_params = READ_ONCE(arg->num_params);
1387 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1388 		goto out_dec_map;
1389 
1390 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1391 
1392 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1393 	if (!thr->rpc_arg)
1394 		goto out_dec_map;
1395 
1396 	virt_on_stdcall();
1397 	res = tee_entry_std(arg, num_params);
1398 
1399 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1400 	thr->rpc_arg = NULL;
1401 
1402 out_dec_map:
1403 	mobj_dec_map(mobj);
1404 out_put_mobj:
1405 	mobj_put(mobj);
1406 	return res;
1407 }
1408 
1409 /*
1410  * Helper routine for the assembly function thread_std_smc_entry()
1411  *
1412  * Note: this function is weak just to make link_dummies_paged.c happy.
1413  */
1414 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1415 				       uint32_t a2, uint32_t a3,
1416 				       uint32_t a4, uint32_t a5 __unused)
1417 {
1418 	/*
1419 	 * Arguments are supplied from handle_yielding_call() as:
1420 	 * a0 <- w1
1421 	 * a1 <- w3
1422 	 * a2 <- w4
1423 	 * a3 <- w5
1424 	 * a4 <- w6
1425 	 * a5 <- w7
1426 	 */
1427 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1428 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1429 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1430 	return FFA_DENIED;
1431 }
1432 
1433 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1434 {
1435 	uint64_t offs = tpm->u.memref.offs;
1436 
1437 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1438 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1439 
1440 	param->u.fmem.offs_low = offs;
1441 	param->u.fmem.offs_high = offs >> 32;
1442 	if (param->u.fmem.offs_high != offs >> 32)
1443 		return false;
1444 
1445 	param->u.fmem.size = tpm->u.memref.size;
1446 	if (tpm->u.memref.mobj) {
1447 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1448 
1449 		/* If a mobj is passed it better be one with a valid cookie. */
1450 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1451 			return false;
1452 		param->u.fmem.global_id = cookie;
1453 	} else {
1454 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1455 	}
1456 
1457 	return true;
1458 }
1459 
1460 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1461 			    struct thread_param *params,
1462 			    struct optee_msg_arg **arg_ret)
1463 {
1464 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1465 	struct thread_ctx *thr = threads + thread_get_id();
1466 	struct optee_msg_arg *arg = thr->rpc_arg;
1467 
1468 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1469 		return TEE_ERROR_BAD_PARAMETERS;
1470 
1471 	if (!arg) {
1472 		EMSG("rpc_arg not set");
1473 		return TEE_ERROR_GENERIC;
1474 	}
1475 
1476 	memset(arg, 0, sz);
1477 	arg->cmd = cmd;
1478 	arg->num_params = num_params;
1479 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1480 
1481 	for (size_t n = 0; n < num_params; n++) {
1482 		switch (params[n].attr) {
1483 		case THREAD_PARAM_ATTR_NONE:
1484 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1485 			break;
1486 		case THREAD_PARAM_ATTR_VALUE_IN:
1487 		case THREAD_PARAM_ATTR_VALUE_OUT:
1488 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1489 			arg->params[n].attr = params[n].attr -
1490 					      THREAD_PARAM_ATTR_VALUE_IN +
1491 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1492 			arg->params[n].u.value.a = params[n].u.value.a;
1493 			arg->params[n].u.value.b = params[n].u.value.b;
1494 			arg->params[n].u.value.c = params[n].u.value.c;
1495 			break;
1496 		case THREAD_PARAM_ATTR_MEMREF_IN:
1497 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1498 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1499 			if (!set_fmem(arg->params + n, params + n))
1500 				return TEE_ERROR_BAD_PARAMETERS;
1501 			break;
1502 		default:
1503 			return TEE_ERROR_BAD_PARAMETERS;
1504 		}
1505 	}
1506 
1507 	if (arg_ret)
1508 		*arg_ret = arg;
1509 
1510 	return TEE_SUCCESS;
1511 }
1512 
1513 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1514 				struct thread_param *params)
1515 {
1516 	for (size_t n = 0; n < num_params; n++) {
1517 		switch (params[n].attr) {
1518 		case THREAD_PARAM_ATTR_VALUE_OUT:
1519 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1520 			params[n].u.value.a = arg->params[n].u.value.a;
1521 			params[n].u.value.b = arg->params[n].u.value.b;
1522 			params[n].u.value.c = arg->params[n].u.value.c;
1523 			break;
1524 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1525 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1526 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1527 			break;
1528 		default:
1529 			break;
1530 		}
1531 	}
1532 
1533 	return arg->ret;
1534 }
1535 
1536 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1537 			struct thread_param *params)
1538 {
1539 	struct thread_rpc_arg rpc_arg = { .call = {
1540 			.w1 = thread_get_tsd()->rpc_target_info,
1541 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1542 		},
1543 	};
1544 	struct optee_msg_arg *arg = NULL;
1545 	uint32_t ret = 0;
1546 
1547 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1548 	if (ret)
1549 		return ret;
1550 
1551 	thread_rpc(&rpc_arg);
1552 
1553 	return get_rpc_arg_res(arg, num_params, params);
1554 }
1555 
1556 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1557 {
1558 	struct thread_rpc_arg rpc_arg = { .call = {
1559 			.w1 = thread_get_tsd()->rpc_target_info,
1560 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1561 		},
1562 	};
1563 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1564 	uint32_t res2 = 0;
1565 	uint32_t res = 0;
1566 
1567 	DMSG("freeing cookie %#"PRIx64, cookie);
1568 
1569 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1570 
1571 	mobj_put(mobj);
1572 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1573 	if (res2)
1574 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1575 		     cookie, res2);
1576 	if (!res)
1577 		thread_rpc(&rpc_arg);
1578 }
1579 
1580 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1581 {
1582 	struct thread_rpc_arg rpc_arg = { .call = {
1583 			.w1 = thread_get_tsd()->rpc_target_info,
1584 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1585 		},
1586 	};
1587 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1588 	struct optee_msg_arg *arg = NULL;
1589 	unsigned int internal_offset = 0;
1590 	struct mobj *mobj = NULL;
1591 	uint64_t cookie = 0;
1592 
1593 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1594 		return NULL;
1595 
1596 	thread_rpc(&rpc_arg);
1597 
1598 	if (arg->num_params != 1 ||
1599 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1600 		return NULL;
1601 
1602 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1603 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1604 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1605 	if (!mobj) {
1606 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1607 		     cookie, internal_offset);
1608 		return NULL;
1609 	}
1610 
1611 	assert(mobj_is_nonsec(mobj));
1612 
1613 	if (mobj->size < size) {
1614 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1615 		mobj_put(mobj);
1616 		return NULL;
1617 	}
1618 
1619 	if (mobj_inc_map(mobj)) {
1620 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1621 		mobj_put(mobj);
1622 		return NULL;
1623 	}
1624 
1625 	return mobj;
1626 }
1627 
1628 struct mobj *thread_rpc_alloc_payload(size_t size)
1629 {
1630 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1631 }
1632 
1633 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1634 {
1635 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1636 }
1637 
1638 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1639 {
1640 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1641 }
1642 
1643 void thread_rpc_free_payload(struct mobj *mobj)
1644 {
1645 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1646 			mobj);
1647 }
1648 
1649 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1650 {
1651 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1652 }
1653 
1654 void thread_rpc_free_global_payload(struct mobj *mobj)
1655 {
1656 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1657 			mobj);
1658 }
1659 
1660 void thread_spmc_register_secondary_ep(vaddr_t ep)
1661 {
1662 	unsigned long ret = 0;
1663 
1664 	/* Let the SPM know the entry point for secondary CPUs */
1665 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
1666 
1667 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
1668 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
1669 }
1670 
1671 #if defined(CFG_CORE_SEL1_SPMC)
1672 static TEE_Result spmc_init(void)
1673 {
1674 	my_endpoint_id = SPMC_ENDPOINT_ID;
1675 	DMSG("My endpoint ID %#x", my_endpoint_id);
1676 
1677 	/*
1678 	 * If SPMD think we are version 1.0 it will report version 1.0 to
1679 	 * normal world regardless of what version we query the SPM with.
1680 	 * However, if SPMD think we are version 1.1 it will forward
1681 	 * queries from normal world to let us negotiate version. So by
1682 	 * setting version 1.0 here we should be compatible.
1683 	 *
1684 	 * Note that disagreement on negotiated version means that we'll
1685 	 * have communication problems with normal world.
1686 	 */
1687 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
1688 
1689 	return TEE_SUCCESS;
1690 }
1691 #else /* !defined(CFG_CORE_SEL1_SPMC) */
1692 static bool is_ffa_success(uint32_t fid)
1693 {
1694 #ifdef ARM64
1695 	if (fid == FFA_SUCCESS_64)
1696 		return true;
1697 #endif
1698 	return fid == FFA_SUCCESS_32;
1699 }
1700 
1701 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1702 {
1703 	struct thread_smc_args args = {
1704 #ifdef ARM64
1705 		.a0 = FFA_RXTX_MAP_64,
1706 #else
1707 		.a0 = FFA_RXTX_MAP_32,
1708 #endif
1709 		.a1 = virt_to_phys(rxtx->tx),
1710 		.a2 = virt_to_phys(rxtx->rx),
1711 		.a3 = 1,
1712 	};
1713 
1714 	thread_smccc(&args);
1715 	if (!is_ffa_success(args.a0)) {
1716 		if (args.a0 == FFA_ERROR)
1717 			EMSG("rxtx map failed with error %ld", args.a2);
1718 		else
1719 			EMSG("rxtx map failed");
1720 		panic();
1721 	}
1722 }
1723 
1724 static uint16_t get_my_id(void)
1725 {
1726 	struct thread_smc_args args = {
1727 		.a0 = FFA_ID_GET,
1728 	};
1729 
1730 	thread_smccc(&args);
1731 	if (!is_ffa_success(args.a0)) {
1732 		if (args.a0 == FFA_ERROR)
1733 			EMSG("Get id failed with error %ld", args.a2);
1734 		else
1735 			EMSG("Get id failed");
1736 		panic();
1737 	}
1738 
1739 	return args.a2;
1740 }
1741 
1742 static uint32_t get_ffa_version(uint32_t my_version)
1743 {
1744 	struct thread_smc_args args = {
1745 		.a0 = FFA_VERSION,
1746 		.a1 = my_version,
1747 	};
1748 
1749 	thread_smccc(&args);
1750 	if (args.a0 & BIT(31)) {
1751 		EMSG("FF-A version failed with error %ld", args.a0);
1752 		panic();
1753 	}
1754 
1755 	return args.a0;
1756 }
1757 
1758 static void *spmc_retrieve_req(uint64_t cookie,
1759 			       struct ffa_mem_transaction_x *trans)
1760 {
1761 	struct ffa_mem_access *acc_descr_array = NULL;
1762 	struct ffa_mem_access_perm *perm_descr = NULL;
1763 	struct thread_smc_args args = {
1764 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1765 		.a3 =	0,	/* Address, Using TX -> MBZ */
1766 		.a4 =   0,	/* Using TX -> MBZ */
1767 	};
1768 	size_t size = 0;
1769 	int rc = 0;
1770 
1771 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
1772 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
1773 
1774 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1775 		memset(trans_descr, 0, size);
1776 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1777 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1778 		trans_descr->global_handle = cookie;
1779 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1780 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1781 		trans_descr->mem_access_count = 1;
1782 		acc_descr_array = trans_descr->mem_access_array;
1783 	} else {
1784 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
1785 
1786 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1787 		memset(trans_descr, 0, size);
1788 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1789 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1790 		trans_descr->global_handle = cookie;
1791 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1792 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1793 		trans_descr->mem_access_count = 1;
1794 		trans_descr->mem_access_offs = sizeof(*trans_descr);
1795 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
1796 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
1797 					   sizeof(*trans_descr));
1798 	}
1799 	acc_descr_array->region_offs = 0;
1800 	acc_descr_array->reserved = 0;
1801 	perm_descr = &acc_descr_array->access_perm;
1802 	perm_descr->endpoint_id = my_endpoint_id;
1803 	perm_descr->perm = FFA_MEM_ACC_RW;
1804 	perm_descr->flags = 0;
1805 
1806 	args.a1 = size; /* Total Length */
1807 	args.a2 = size; /* Frag Length == Total length */
1808 	thread_smccc(&args);
1809 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1810 		if (args.a0 == FFA_ERROR)
1811 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1812 			     cookie, (int)args.a2);
1813 		else
1814 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1815 			     cookie, args.a0);
1816 		return NULL;
1817 	}
1818 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.tx,
1819 				       my_rxtx.size, trans);
1820 	if (rc) {
1821 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
1822 		     cookie, rc);
1823 		return NULL;
1824 	}
1825 
1826 	return my_rxtx.rx;
1827 }
1828 
1829 void thread_spmc_relinquish(uint64_t cookie)
1830 {
1831 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
1832 	struct thread_smc_args args = {
1833 		.a0 = FFA_MEM_RELINQUISH,
1834 	};
1835 
1836 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1837 	relinquish_desc->handle = cookie;
1838 	relinquish_desc->flags = 0;
1839 	relinquish_desc->endpoint_count = 1;
1840 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1841 	thread_smccc(&args);
1842 	if (!is_ffa_success(args.a0))
1843 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1844 }
1845 
1846 static int set_pages(struct ffa_address_range *regions,
1847 		     unsigned int num_regions, unsigned int num_pages,
1848 		     struct mobj_ffa *mf)
1849 {
1850 	unsigned int n = 0;
1851 	unsigned int idx = 0;
1852 
1853 	for (n = 0; n < num_regions; n++) {
1854 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1855 		uint64_t addr = READ_ONCE(regions[n].address);
1856 
1857 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1858 			return FFA_INVALID_PARAMETERS;
1859 	}
1860 
1861 	if (idx != num_pages)
1862 		return FFA_INVALID_PARAMETERS;
1863 
1864 	return 0;
1865 }
1866 
1867 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1868 {
1869 	struct mobj_ffa *ret = NULL;
1870 	struct ffa_mem_transaction_x retrieve_desc = { };
1871 	struct ffa_mem_access *descr_array = NULL;
1872 	struct ffa_mem_region *descr = NULL;
1873 	struct mobj_ffa *mf = NULL;
1874 	unsigned int num_pages = 0;
1875 	unsigned int offs = 0;
1876 	void *buf = NULL;
1877 	struct thread_smc_args ffa_rx_release_args = {
1878 		.a0 = FFA_RX_RELEASE
1879 	};
1880 
1881 	/*
1882 	 * OP-TEE is only supporting a single mem_region while the
1883 	 * specification allows for more than one.
1884 	 */
1885 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
1886 	if (!buf) {
1887 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1888 		     cookie);
1889 		return NULL;
1890 	}
1891 
1892 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
1893 	offs = READ_ONCE(descr_array->region_offs);
1894 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
1895 
1896 	num_pages = READ_ONCE(descr->total_page_count);
1897 	mf = mobj_ffa_spmc_new(cookie, num_pages);
1898 	if (!mf)
1899 		goto out;
1900 
1901 	if (set_pages(descr->address_range_array,
1902 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1903 		mobj_ffa_spmc_delete(mf);
1904 		goto out;
1905 	}
1906 
1907 	ret = mf;
1908 
1909 out:
1910 	/* Release RX buffer after the mem retrieve request. */
1911 	thread_smccc(&ffa_rx_release_args);
1912 
1913 	return ret;
1914 }
1915 
1916 static TEE_Result spmc_init(void)
1917 {
1918 	unsigned int major = 0;
1919 	unsigned int minor __maybe_unused = 0;
1920 	uint32_t my_vers = 0;
1921 	uint32_t vers = 0;
1922 
1923 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1924 	vers = get_ffa_version(my_vers);
1925 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
1926 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
1927 	DMSG("SPMC reported version %u.%u", major, minor);
1928 	if (major != FFA_VERSION_MAJOR) {
1929 		EMSG("Incompatible major version %u, expected %u",
1930 		     major, FFA_VERSION_MAJOR);
1931 		panic();
1932 	}
1933 	if (vers < my_vers)
1934 		my_vers = vers;
1935 	DMSG("Using version %u.%u",
1936 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
1937 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
1938 	my_rxtx.ffa_vers = my_vers;
1939 
1940 	spmc_rxtx_map(&my_rxtx);
1941 	my_endpoint_id = get_my_id();
1942 	DMSG("My endpoint ID %#x", my_endpoint_id);
1943 
1944 	return TEE_SUCCESS;
1945 }
1946 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
1947 
1948 /*
1949  * boot_final() is always done before exiting at end of boot
1950  * initialization.  In case of virtualization the init-calls are done only
1951  * once a OP-TEE partition has been created. So with virtualization we have
1952  * to initialize via boot_final() to make sure we have a value assigned
1953  * before it's used the first time.
1954  */
1955 #ifdef CFG_NS_VIRTUALIZATION
1956 boot_final(spmc_init);
1957 #else
1958 service_init(spmc_init);
1959 #endif
1960