xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 412d46f668c2b95b76bc528c63a8ed27db6ac553)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_private.h>
19 #include <kernel/thread_spmc.h>
20 #include <kernel/virtualization.h>
21 #include <mm/core_mmu.h>
22 #include <mm/mobj.h>
23 #include <optee_ffa.h>
24 #include <optee_msg.h>
25 #include <optee_rpc_cmd.h>
26 #include <string.h>
27 #include <sys/queue.h>
28 #include <tee/entry_std.h>
29 #include <tee/uuid.h>
30 #include <util.h>
31 
32 #if defined(CFG_CORE_SEL1_SPMC)
33 struct mem_share_state {
34 	struct mobj_ffa *mf;
35 	unsigned int page_count;
36 	unsigned int region_count;
37 	unsigned int current_page_idx;
38 };
39 
40 struct mem_frag_state {
41 	struct mem_share_state share;
42 	tee_mm_entry_t *mm;
43 	unsigned int frag_offset;
44 	SLIST_ENTRY(mem_frag_state) link;
45 };
46 #endif
47 
48 /* Initialized in spmc_init() below */
49 static uint16_t my_endpoint_id __nex_bss;
50 #ifdef CFG_CORE_SEL1_SPMC
51 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
52 				      FFA_PART_PROP_DIRECT_REQ_SEND |
53 #ifdef CFG_NS_VIRTUALIZATION
54 				      FFA_PART_PROP_NOTIF_CREATED |
55 				      FFA_PART_PROP_NOTIF_DESTROYED |
56 #endif
57 #ifdef ARM64
58 				      FFA_PART_PROP_AARCH64_STATE |
59 #endif
60 				      FFA_PART_PROP_IS_PE_ID;
61 
62 static uint32_t my_uuid_words[] = {
63 	/*
64 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
65 	 *   SP, or
66 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
67 	 *   logical partition, residing in the same exception level as the
68 	 *   SPMC
69 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
70 	 */
71 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
72 };
73 
74 /*
75  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
76  *
77  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
78  * access this includes the use of content of struct ffa_rxtx::rx and
79  * @frag_state_head.
80  *
81  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
82  * ffa_rxtx::tx and false when it is owned by normal world.
83  *
84  * Note that we can't prevent normal world from updating the content of
85  * these buffers so we must always be careful when reading. while we hold
86  * the lock.
87  */
88 
89 static struct ffa_rxtx nw_rxtx __nex_bss;
90 
91 static bool is_nw_buf(struct ffa_rxtx *rxtx)
92 {
93 	return rxtx == &nw_rxtx;
94 }
95 
96 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
97 	SLIST_HEAD_INITIALIZER(&frag_state_head);
98 #else
99 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
100 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
101 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf };
102 #endif
103 
104 static uint32_t swap_src_dst(uint32_t src_dst)
105 {
106 	return (src_dst >> 16) | (src_dst << 16);
107 }
108 
109 static uint16_t get_sender_id(uint32_t src_dst)
110 {
111 	return src_dst >> 16;
112 }
113 
114 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
115 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
116 {
117 	*args = (struct thread_smc_args){ .a0 = fid,
118 					  .a1 = src_dst,
119 					  .a2 = w2,
120 					  .a3 = w3,
121 					  .a4 = w4,
122 					  .a5 = w5, };
123 }
124 
125 static uint32_t exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
126 {
127 	/*
128 	 * No locking, if the caller does concurrent calls to this it's
129 	 * only making a mess for itself. We must be able to renegotiate
130 	 * the FF-A version in order to support differing versions between
131 	 * the loader and the driver.
132 	 */
133 	if (vers < FFA_VERSION_1_1)
134 		rxtx->ffa_vers = FFA_VERSION_1_0;
135 	else
136 		rxtx->ffa_vers = FFA_VERSION_1_1;
137 
138 	return rxtx->ffa_vers;
139 }
140 
141 #if defined(CFG_CORE_SEL1_SPMC)
142 void spmc_handle_version(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
143 {
144 	spmc_set_args(args, exchange_version(args->a0, rxtx), FFA_PARAM_MBZ,
145 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
146 		      FFA_PARAM_MBZ);
147 }
148 
149 static void handle_features(struct thread_smc_args *args)
150 {
151 	uint32_t ret_fid = 0;
152 	uint32_t ret_w2 = FFA_PARAM_MBZ;
153 
154 	switch (args->a1) {
155 #ifdef ARM64
156 	case FFA_RXTX_MAP_64:
157 #endif
158 	case FFA_RXTX_MAP_32:
159 		ret_fid = FFA_SUCCESS_32;
160 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
161 		break;
162 #ifdef ARM64
163 	case FFA_MEM_SHARE_64:
164 #endif
165 	case FFA_MEM_SHARE_32:
166 		ret_fid = FFA_SUCCESS_32;
167 		/*
168 		 * Partition manager supports transmission of a memory
169 		 * transaction descriptor in a buffer dynamically allocated
170 		 * by the endpoint.
171 		 */
172 		ret_w2 = BIT(0);
173 		break;
174 
175 	case FFA_ERROR:
176 	case FFA_VERSION:
177 	case FFA_SUCCESS_32:
178 #ifdef ARM64
179 	case FFA_SUCCESS_64:
180 #endif
181 	case FFA_FEATURES:
182 	case FFA_SPM_ID_GET:
183 	case FFA_MEM_FRAG_TX:
184 	case FFA_MEM_RECLAIM:
185 	case FFA_MSG_SEND_DIRECT_REQ_32:
186 	case FFA_INTERRUPT:
187 	case FFA_PARTITION_INFO_GET:
188 	case FFA_RXTX_UNMAP:
189 	case FFA_RX_RELEASE:
190 	case FFA_FEATURE_MANAGED_EXIT_INTR:
191 		ret_fid = FFA_SUCCESS_32;
192 		break;
193 	default:
194 		ret_fid = FFA_ERROR;
195 		ret_w2 = FFA_NOT_SUPPORTED;
196 		break;
197 	}
198 
199 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
200 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
201 }
202 
203 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
204 {
205 	tee_mm_entry_t *mm = NULL;
206 
207 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
208 		return FFA_INVALID_PARAMETERS;
209 
210 	mm = tee_mm_alloc(&tee_mm_shm, sz);
211 	if (!mm)
212 		return FFA_NO_MEMORY;
213 
214 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
215 					  sz / SMALL_PAGE_SIZE,
216 					  MEM_AREA_NSEC_SHM)) {
217 		tee_mm_free(mm);
218 		return FFA_INVALID_PARAMETERS;
219 	}
220 
221 	*va_ret = (void *)tee_mm_get_smem(mm);
222 	return 0;
223 }
224 
225 static void handle_spm_id_get(struct thread_smc_args *args)
226 {
227 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
228 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
229 }
230 
231 static void unmap_buf(void *va, size_t sz)
232 {
233 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
234 
235 	assert(mm);
236 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
237 	tee_mm_free(mm);
238 }
239 
240 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
241 {
242 	int rc = 0;
243 	uint32_t ret_fid = FFA_ERROR;
244 	unsigned int sz = 0;
245 	paddr_t rx_pa = 0;
246 	paddr_t tx_pa = 0;
247 	void *rx = NULL;
248 	void *tx = NULL;
249 
250 	cpu_spin_lock(&rxtx->spinlock);
251 
252 	if (args->a3 & GENMASK_64(63, 6)) {
253 		rc = FFA_INVALID_PARAMETERS;
254 		goto out;
255 	}
256 
257 	sz = args->a3 * SMALL_PAGE_SIZE;
258 	if (!sz) {
259 		rc = FFA_INVALID_PARAMETERS;
260 		goto out;
261 	}
262 	/* TX/RX are swapped compared to the caller */
263 	tx_pa = args->a2;
264 	rx_pa = args->a1;
265 
266 	if (rxtx->size) {
267 		rc = FFA_DENIED;
268 		goto out;
269 	}
270 
271 	/*
272 	 * If the buffer comes from a SP the address is virtual and already
273 	 * mapped.
274 	 */
275 	if (is_nw_buf(rxtx)) {
276 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
277 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
278 			bool tx_alloced = false;
279 
280 			/*
281 			 * With virtualization we establish this mapping in
282 			 * the nexus mapping which then is replicated to
283 			 * each partition.
284 			 *
285 			 * This means that this mapping must be done before
286 			 * any partition is created and then must not be
287 			 * changed.
288 			 */
289 
290 			/*
291 			 * core_mmu_add_mapping() may reuse previous
292 			 * mappings. First check if there's any mappings to
293 			 * reuse so we know how to clean up in case of
294 			 * failure.
295 			 */
296 			tx = phys_to_virt(tx_pa, mt, sz);
297 			rx = phys_to_virt(rx_pa, mt, sz);
298 			if (!tx) {
299 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
300 				if (!tx) {
301 					rc = FFA_NO_MEMORY;
302 					goto out;
303 				}
304 				tx_alloced = true;
305 			}
306 			if (!rx)
307 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
308 
309 			if (!rx) {
310 				if (tx_alloced && tx)
311 					core_mmu_remove_mapping(mt, tx, sz);
312 				rc = FFA_NO_MEMORY;
313 				goto out;
314 			}
315 		} else {
316 			rc = map_buf(tx_pa, sz, &tx);
317 			if (rc)
318 				goto out;
319 			rc = map_buf(rx_pa, sz, &rx);
320 			if (rc) {
321 				unmap_buf(tx, sz);
322 				goto out;
323 			}
324 		}
325 		rxtx->tx = tx;
326 		rxtx->rx = rx;
327 	} else {
328 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
329 			rc = FFA_INVALID_PARAMETERS;
330 			goto out;
331 		}
332 
333 		if (!virt_to_phys((void *)tx_pa) ||
334 		    !virt_to_phys((void *)rx_pa)) {
335 			rc = FFA_INVALID_PARAMETERS;
336 			goto out;
337 		}
338 
339 		rxtx->tx = (void *)tx_pa;
340 		rxtx->rx = (void *)rx_pa;
341 	}
342 
343 	rxtx->size = sz;
344 	rxtx->tx_is_mine = true;
345 	ret_fid = FFA_SUCCESS_32;
346 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
347 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
348 out:
349 	cpu_spin_unlock(&rxtx->spinlock);
350 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
351 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
352 }
353 
354 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
355 {
356 	uint32_t ret_fid = FFA_ERROR;
357 	int rc = FFA_INVALID_PARAMETERS;
358 
359 	cpu_spin_lock(&rxtx->spinlock);
360 
361 	if (!rxtx->size)
362 		goto out;
363 
364 	/* We don't unmap the SP memory as the SP might still use it */
365 	if (is_nw_buf(rxtx)) {
366 		unmap_buf(rxtx->rx, rxtx->size);
367 		unmap_buf(rxtx->tx, rxtx->size);
368 	}
369 	rxtx->size = 0;
370 	rxtx->rx = NULL;
371 	rxtx->tx = NULL;
372 	ret_fid = FFA_SUCCESS_32;
373 	rc = 0;
374 out:
375 	cpu_spin_unlock(&rxtx->spinlock);
376 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
377 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
378 }
379 
380 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
381 {
382 	uint32_t ret_fid = 0;
383 	int rc = 0;
384 
385 	cpu_spin_lock(&rxtx->spinlock);
386 	/* The senders RX is our TX */
387 	if (!rxtx->size || rxtx->tx_is_mine) {
388 		ret_fid = FFA_ERROR;
389 		rc = FFA_DENIED;
390 	} else {
391 		ret_fid = FFA_SUCCESS_32;
392 		rc = 0;
393 		rxtx->tx_is_mine = true;
394 	}
395 	cpu_spin_unlock(&rxtx->spinlock);
396 
397 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
398 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
399 }
400 
401 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
402 {
403 	return !w0 && !w1 && !w2 && !w3;
404 }
405 
406 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
407 {
408 	/*
409 	 * This depends on which UUID we have been assigned.
410 	 * TODO add a generic mechanism to obtain our UUID.
411 	 *
412 	 * The test below is for the hard coded UUID
413 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
414 	 */
415 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
416 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
417 }
418 
419 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
420 				     size_t idx, uint16_t endpoint_id,
421 				     uint16_t execution_context,
422 				     uint32_t part_props,
423 				     const uint32_t uuid_words[4])
424 {
425 	struct ffa_partition_info_x *fpi = NULL;
426 	size_t fpi_size = sizeof(*fpi);
427 
428 	if (ffa_vers >= FFA_VERSION_1_1)
429 		fpi_size += FFA_UUID_SIZE;
430 
431 	if ((idx + 1) * fpi_size > blen)
432 		return TEE_ERROR_OUT_OF_MEMORY;
433 
434 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
435 	fpi->id = endpoint_id;
436 	/* Number of execution contexts implemented by this partition */
437 	fpi->execution_context = execution_context;
438 
439 	fpi->partition_properties = part_props;
440 
441 	if (ffa_vers >= FFA_VERSION_1_1) {
442 		if (uuid_words)
443 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
444 		else
445 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
446 	}
447 
448 	return TEE_SUCCESS;
449 }
450 
451 static int handle_partition_info_get_all(size_t *elem_count,
452 					 struct ffa_rxtx *rxtx, bool count_only)
453 {
454 	if (!count_only) {
455 		/* Add OP-TEE SP */
456 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
457 					      rxtx->size, 0, my_endpoint_id,
458 					      CFG_TEE_CORE_NB_CORE,
459 					      my_part_props, my_uuid_words))
460 			return FFA_NO_MEMORY;
461 	}
462 	*elem_count = 1;
463 
464 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
465 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
466 					  NULL, elem_count, count_only))
467 			return FFA_NO_MEMORY;
468 	}
469 
470 	return FFA_OK;
471 }
472 
473 void spmc_handle_partition_info_get(struct thread_smc_args *args,
474 				    struct ffa_rxtx *rxtx)
475 {
476 	TEE_Result res = TEE_SUCCESS;
477 	uint32_t ret_fid = FFA_ERROR;
478 	uint32_t rc = 0;
479 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
480 
481 	if (!count_only) {
482 		cpu_spin_lock(&rxtx->spinlock);
483 
484 		if (!rxtx->size || !rxtx->tx_is_mine) {
485 			rc = FFA_BUSY;
486 			goto out;
487 		}
488 	}
489 
490 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
491 		size_t elem_count = 0;
492 
493 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
494 							count_only);
495 
496 		if (ret_fid) {
497 			rc = ret_fid;
498 			ret_fid = FFA_ERROR;
499 		} else {
500 			ret_fid = FFA_SUCCESS_32;
501 			rc = elem_count;
502 		}
503 
504 		goto out;
505 	}
506 
507 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
508 		if (!count_only) {
509 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
510 							rxtx->tx, rxtx->size, 0,
511 							my_endpoint_id,
512 							CFG_TEE_CORE_NB_CORE,
513 							my_part_props,
514 							my_uuid_words);
515 			if (res) {
516 				ret_fid = FFA_ERROR;
517 				rc = FFA_INVALID_PARAMETERS;
518 				goto out;
519 			}
520 		}
521 		rc = 1;
522 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
523 		uint32_t uuid_array[4] = { 0 };
524 		TEE_UUID uuid = { };
525 		size_t count = 0;
526 
527 		uuid_array[0] = args->a1;
528 		uuid_array[1] = args->a2;
529 		uuid_array[2] = args->a3;
530 		uuid_array[3] = args->a4;
531 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
532 
533 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
534 					    rxtx->size, &uuid, &count,
535 					    count_only);
536 		if (res != TEE_SUCCESS) {
537 			ret_fid = FFA_ERROR;
538 			rc = FFA_INVALID_PARAMETERS;
539 			goto out;
540 		}
541 		rc = count;
542 	} else {
543 		ret_fid = FFA_ERROR;
544 		rc = FFA_INVALID_PARAMETERS;
545 		goto out;
546 	}
547 
548 	ret_fid = FFA_SUCCESS_32;
549 
550 out:
551 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
552 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
553 	if (!count_only) {
554 		rxtx->tx_is_mine = false;
555 		cpu_spin_unlock(&rxtx->spinlock);
556 	}
557 }
558 #endif /*CFG_CORE_SEL1_SPMC*/
559 
560 static void handle_yielding_call(struct thread_smc_args *args)
561 {
562 	TEE_Result res = 0;
563 
564 	thread_check_canaries();
565 
566 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
567 		/* Note connection to struct thread_rpc_arg::ret */
568 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
569 				       0);
570 		res = TEE_ERROR_BAD_PARAMETERS;
571 	} else {
572 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
573 				     args->a6, args->a7);
574 		res = TEE_ERROR_BUSY;
575 	}
576 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
577 		      swap_src_dst(args->a1), 0, res, 0, 0);
578 }
579 
580 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
581 {
582 	uint64_t cookie = reg_pair_to_64(a5, a4);
583 	uint32_t res = 0;
584 
585 	res = mobj_ffa_unregister_by_cookie(cookie);
586 	switch (res) {
587 	case TEE_SUCCESS:
588 	case TEE_ERROR_ITEM_NOT_FOUND:
589 		return 0;
590 	case TEE_ERROR_BUSY:
591 		EMSG("res %#"PRIx32, res);
592 		return FFA_BUSY;
593 	default:
594 		EMSG("res %#"PRIx32, res);
595 		return FFA_INVALID_PARAMETERS;
596 	}
597 }
598 
599 static void handle_blocking_call(struct thread_smc_args *args)
600 {
601 	switch (args->a3) {
602 	case OPTEE_FFA_GET_API_VERSION:
603 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
604 			      swap_src_dst(args->a1), 0,
605 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
606 			      0);
607 		break;
608 	case OPTEE_FFA_GET_OS_VERSION:
609 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
610 			      swap_src_dst(args->a1), 0,
611 			      CFG_OPTEE_REVISION_MAJOR,
612 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
613 		break;
614 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
615 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
616 			      swap_src_dst(args->a1), 0, 0,
617 			      THREAD_RPC_MAX_NUM_PARAMS,
618 			      OPTEE_FFA_SEC_CAP_ARG_OFFSET);
619 		break;
620 	case OPTEE_FFA_UNREGISTER_SHM:
621 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
622 			      swap_src_dst(args->a1), 0,
623 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
624 		break;
625 	default:
626 		EMSG("Unhandled blocking service ID %#"PRIx32,
627 		     (uint32_t)args->a3);
628 		panic();
629 	}
630 }
631 
632 static void handle_framework_direct_request(struct thread_smc_args *args,
633 					    struct ffa_rxtx *rxtx)
634 {
635 	uint32_t w0 = FFA_ERROR;
636 	uint32_t w1 = FFA_PARAM_MBZ;
637 	uint32_t w2 = FFA_NOT_SUPPORTED;
638 	uint32_t w3 = FFA_PARAM_MBZ;
639 
640 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
641 	case FFA_MSG_SEND_VM_CREATED:
642 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
643 			uint16_t guest_id = args->a5;
644 			TEE_Result res = virt_guest_created(guest_id);
645 
646 			w0 = FFA_MSG_SEND_DIRECT_RESP_32;
647 			w1 = swap_src_dst(args->a1);
648 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
649 			if (res == TEE_SUCCESS)
650 				w3 = FFA_OK;
651 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
652 				w3 = FFA_DENIED;
653 			else
654 				w3 = FFA_INVALID_PARAMETERS;
655 		}
656 		break;
657 	case FFA_MSG_VERSION_REQ:
658 		w0 = FFA_MSG_SEND_DIRECT_RESP_32;
659 		w1 = swap_src_dst(args->a1);
660 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
661 		w3 = exchange_version(args->a3, rxtx);
662 		break;
663 	default:
664 		break;
665 	}
666 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
667 }
668 
669 static void handle_direct_request(struct thread_smc_args *args,
670 				  struct ffa_rxtx *rxtx)
671 {
672 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
673 	    FFA_DST(args->a1) != my_endpoint_id) {
674 		spmc_sp_start_thread(args);
675 		return;
676 	}
677 
678 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
679 		handle_framework_direct_request(args, rxtx);
680 		return;
681 	}
682 
683 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
684 	    virt_set_guest(get_sender_id(args->a1))) {
685 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
686 			      swap_src_dst(args->a1), 0,
687 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
688 		return;
689 	}
690 
691 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
692 		handle_yielding_call(args);
693 	else
694 		handle_blocking_call(args);
695 
696 	/*
697 	 * Note that handle_yielding_call() typically only returns if a
698 	 * thread cannot be allocated or found. virt_unset_guest() is also
699 	 * called from thread_state_suspend() and thread_state_free().
700 	 */
701 	virt_unset_guest();
702 }
703 
704 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
705 			      struct ffa_mem_transaction_x *trans)
706 {
707 	uint16_t mem_reg_attr = 0;
708 	uint32_t flags = 0;
709 	uint32_t count = 0;
710 	uint32_t offs = 0;
711 	uint32_t size = 0;
712 	size_t n = 0;
713 
714 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
715 		return FFA_INVALID_PARAMETERS;
716 
717 	if (ffa_vers >= FFA_VERSION_1_1) {
718 		struct ffa_mem_transaction_1_1 *descr = NULL;
719 
720 		if (blen < sizeof(*descr))
721 			return FFA_INVALID_PARAMETERS;
722 
723 		descr = buf;
724 		trans->sender_id = READ_ONCE(descr->sender_id);
725 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
726 		flags = READ_ONCE(descr->flags);
727 		trans->global_handle = READ_ONCE(descr->global_handle);
728 		trans->tag = READ_ONCE(descr->tag);
729 
730 		count = READ_ONCE(descr->mem_access_count);
731 		size = READ_ONCE(descr->mem_access_size);
732 		offs = READ_ONCE(descr->mem_access_offs);
733 	} else {
734 		struct ffa_mem_transaction_1_0 *descr = NULL;
735 
736 		if (blen < sizeof(*descr))
737 			return FFA_INVALID_PARAMETERS;
738 
739 		descr = buf;
740 		trans->sender_id = READ_ONCE(descr->sender_id);
741 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
742 		flags = READ_ONCE(descr->flags);
743 		trans->global_handle = READ_ONCE(descr->global_handle);
744 		trans->tag = READ_ONCE(descr->tag);
745 
746 		count = READ_ONCE(descr->mem_access_count);
747 		size = sizeof(struct ffa_mem_access);
748 		offs = offsetof(struct ffa_mem_transaction_1_0,
749 				mem_access_array);
750 	}
751 
752 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
753 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
754 		return FFA_INVALID_PARAMETERS;
755 
756 	/* Check that the endpoint memory access descriptor array fits */
757 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
758 	    n > blen)
759 		return FFA_INVALID_PARAMETERS;
760 
761 	trans->mem_reg_attr = mem_reg_attr;
762 	trans->flags = flags;
763 	trans->mem_access_size = size;
764 	trans->mem_access_count = count;
765 	trans->mem_access_offs = offs;
766 	return 0;
767 }
768 
769 #if defined(CFG_CORE_SEL1_SPMC)
770 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
771 			 unsigned int mem_access_count, uint8_t *acc_perms,
772 			 unsigned int *region_offs)
773 {
774 	struct ffa_mem_access_perm *descr = NULL;
775 	struct ffa_mem_access *mem_acc = NULL;
776 	unsigned int n = 0;
777 
778 	for (n = 0; n < mem_access_count; n++) {
779 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
780 		descr = &mem_acc->access_perm;
781 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
782 			*acc_perms = READ_ONCE(descr->perm);
783 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
784 			return 0;
785 		}
786 	}
787 
788 	return FFA_INVALID_PARAMETERS;
789 }
790 
791 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
792 			  size_t blen, unsigned int *page_count,
793 			  unsigned int *region_count, size_t *addr_range_offs)
794 {
795 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
796 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
797 	struct ffa_mem_region *region_descr = NULL;
798 	unsigned int region_descr_offs = 0;
799 	uint8_t mem_acc_perm = 0;
800 	size_t n = 0;
801 
802 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
803 		return FFA_INVALID_PARAMETERS;
804 
805 	/* Check that the access permissions matches what's expected */
806 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
807 			  mem_trans->mem_access_size,
808 			  mem_trans->mem_access_count,
809 			  &mem_acc_perm, &region_descr_offs) ||
810 	    mem_acc_perm != exp_mem_acc_perm)
811 		return FFA_INVALID_PARAMETERS;
812 
813 	/* Check that the Composite memory region descriptor fits */
814 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
815 	    n > blen)
816 		return FFA_INVALID_PARAMETERS;
817 
818 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
819 				  struct ffa_mem_region))
820 		return FFA_INVALID_PARAMETERS;
821 
822 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
823 						 region_descr_offs);
824 	*page_count = READ_ONCE(region_descr->total_page_count);
825 	*region_count = READ_ONCE(region_descr->address_range_count);
826 	*addr_range_offs = n;
827 	return 0;
828 }
829 
830 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
831 				size_t flen)
832 {
833 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
834 	struct ffa_address_range *arange = NULL;
835 	unsigned int n = 0;
836 
837 	if (region_count > s->region_count)
838 		region_count = s->region_count;
839 
840 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
841 		return FFA_INVALID_PARAMETERS;
842 	arange = buf;
843 
844 	for (n = 0; n < region_count; n++) {
845 		unsigned int page_count = READ_ONCE(arange[n].page_count);
846 		uint64_t addr = READ_ONCE(arange[n].address);
847 
848 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
849 					  addr, page_count))
850 			return FFA_INVALID_PARAMETERS;
851 	}
852 
853 	s->region_count -= region_count;
854 	if (s->region_count)
855 		return region_count * sizeof(*arange);
856 
857 	if (s->current_page_idx != s->page_count)
858 		return FFA_INVALID_PARAMETERS;
859 
860 	return 0;
861 }
862 
863 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
864 {
865 	int rc = 0;
866 
867 	rc = add_mem_share_helper(&s->share, buf, flen);
868 	if (rc >= 0) {
869 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
870 			/* We're not at the end of the descriptor yet */
871 			if (s->share.region_count)
872 				return s->frag_offset;
873 
874 			/* We're done */
875 			rc = 0;
876 		} else {
877 			rc = FFA_INVALID_PARAMETERS;
878 		}
879 	}
880 
881 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
882 	if (rc < 0)
883 		mobj_ffa_sel1_spmc_delete(s->share.mf);
884 	else
885 		mobj_ffa_push_to_inactive(s->share.mf);
886 	free(s);
887 
888 	return rc;
889 }
890 
891 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
892 			void *buf)
893 {
894 	struct ffa_mem_access_perm *perm = NULL;
895 	struct ffa_mem_access *mem_acc = NULL;
896 
897 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
898 		return false;
899 
900 	if (mem_trans->mem_access_count < 1)
901 		return false;
902 
903 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
904 	perm = &mem_acc->access_perm;
905 
906 	/*
907 	 * perm->endpoint_id is read here only to check if the endpoint is
908 	 * OP-TEE. We do read it later on again, but there are some additional
909 	 * checks there to make sure that the data is correct.
910 	 */
911 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
912 }
913 
914 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
915 			 tee_mm_entry_t *mm, void *buf, size_t blen,
916 			 size_t flen, uint64_t *global_handle)
917 {
918 	int rc = 0;
919 	struct mem_share_state share = { };
920 	size_t addr_range_offs = 0;
921 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
922 	size_t n = 0;
923 
924 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
925 			    &share.region_count, &addr_range_offs);
926 	if (rc)
927 		return rc;
928 
929 	if (MUL_OVERFLOW(share.region_count,
930 			 sizeof(struct ffa_address_range), &n) ||
931 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
932 		return FFA_INVALID_PARAMETERS;
933 
934 	if (mem_trans->global_handle)
935 		cookie = mem_trans->global_handle;
936 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
937 	if (!share.mf)
938 		return FFA_NO_MEMORY;
939 
940 	if (flen != blen) {
941 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
942 
943 		if (!s) {
944 			rc = FFA_NO_MEMORY;
945 			goto err;
946 		}
947 		s->share = share;
948 		s->mm = mm;
949 		s->frag_offset = addr_range_offs;
950 
951 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
952 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
953 					flen - addr_range_offs);
954 
955 		if (rc >= 0)
956 			*global_handle = mobj_ffa_get_cookie(share.mf);
957 
958 		return rc;
959 	}
960 
961 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
962 				  flen - addr_range_offs);
963 	if (rc) {
964 		/*
965 		 * Number of consumed bytes may be returned instead of 0 for
966 		 * done.
967 		 */
968 		rc = FFA_INVALID_PARAMETERS;
969 		goto err;
970 	}
971 
972 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
973 
974 	return 0;
975 err:
976 	mobj_ffa_sel1_spmc_delete(share.mf);
977 	return rc;
978 }
979 
980 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
981 				 unsigned int page_count,
982 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
983 {
984 	struct ffa_mem_transaction_x mem_trans = { };
985 	int rc = 0;
986 	size_t len = 0;
987 	void *buf = NULL;
988 	tee_mm_entry_t *mm = NULL;
989 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
990 
991 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
992 		return FFA_INVALID_PARAMETERS;
993 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
994 		return FFA_INVALID_PARAMETERS;
995 
996 	/*
997 	 * Check that the length reported in flen is covered by len even
998 	 * if the offset is taken into account.
999 	 */
1000 	if (len < flen || len - offs < flen)
1001 		return FFA_INVALID_PARAMETERS;
1002 
1003 	mm = tee_mm_alloc(&tee_mm_shm, len);
1004 	if (!mm)
1005 		return FFA_NO_MEMORY;
1006 
1007 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1008 					  page_count, MEM_AREA_NSEC_SHM)) {
1009 		rc = FFA_INVALID_PARAMETERS;
1010 		goto out;
1011 	}
1012 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1013 
1014 	cpu_spin_lock(&rxtx->spinlock);
1015 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1016 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1017 	    virt_set_guest(mem_trans.sender_id))
1018 		rc = FFA_DENIED;
1019 	if (!rc)
1020 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1021 				   global_handle);
1022 	virt_unset_guest();
1023 	cpu_spin_unlock(&rxtx->spinlock);
1024 	if (rc > 0)
1025 		return rc;
1026 
1027 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1028 out:
1029 	tee_mm_free(mm);
1030 	return rc;
1031 }
1032 
1033 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1034 				  uint64_t *global_handle,
1035 				  struct ffa_rxtx *rxtx)
1036 {
1037 	struct ffa_mem_transaction_x mem_trans = { };
1038 	int rc = FFA_DENIED;
1039 
1040 	cpu_spin_lock(&rxtx->spinlock);
1041 
1042 	if (!rxtx->rx || flen > rxtx->size)
1043 		goto out;
1044 
1045 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1046 				       &mem_trans);
1047 	if (rc)
1048 		goto out;
1049 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1050 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1051 				       global_handle, NULL);
1052 		goto out;
1053 	}
1054 
1055 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1056 	    virt_set_guest(mem_trans.sender_id))
1057 		goto out;
1058 
1059 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1060 			   global_handle);
1061 
1062 	virt_unset_guest();
1063 
1064 out:
1065 	cpu_spin_unlock(&rxtx->spinlock);
1066 
1067 	return rc;
1068 }
1069 
1070 static void handle_mem_share(struct thread_smc_args *args,
1071 			     struct ffa_rxtx *rxtx)
1072 {
1073 	uint32_t tot_len = args->a1;
1074 	uint32_t frag_len = args->a2;
1075 	uint64_t addr = args->a3;
1076 	uint32_t page_count = args->a4;
1077 	uint32_t ret_w1 = 0;
1078 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1079 	uint32_t ret_w3 = 0;
1080 	uint32_t ret_fid = FFA_ERROR;
1081 	uint64_t global_handle = 0;
1082 	int rc = 0;
1083 
1084 	/* Check that the MBZs are indeed 0 */
1085 	if (args->a5 || args->a6 || args->a7)
1086 		goto out;
1087 
1088 	/* Check that fragment length doesn't exceed total length */
1089 	if (frag_len > tot_len)
1090 		goto out;
1091 
1092 	/* Check for 32-bit calling convention */
1093 	if (args->a0 == FFA_MEM_SHARE_32)
1094 		addr &= UINT32_MAX;
1095 
1096 	if (!addr) {
1097 		/*
1098 		 * The memory transaction descriptor is passed via our rx
1099 		 * buffer.
1100 		 */
1101 		if (page_count)
1102 			goto out;
1103 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1104 					    rxtx);
1105 	} else {
1106 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1107 					   &global_handle, rxtx);
1108 	}
1109 	if (rc < 0) {
1110 		ret_w2 = rc;
1111 	} else if (rc > 0) {
1112 		ret_fid = FFA_MEM_FRAG_RX;
1113 		ret_w3 = rc;
1114 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1115 	} else {
1116 		ret_fid = FFA_SUCCESS_32;
1117 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1118 	}
1119 out:
1120 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1121 }
1122 
1123 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1124 {
1125 	struct mem_frag_state *s = NULL;
1126 
1127 	SLIST_FOREACH(s, &frag_state_head, link)
1128 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1129 			return s;
1130 
1131 	return NULL;
1132 }
1133 
1134 static void handle_mem_frag_tx(struct thread_smc_args *args,
1135 			       struct ffa_rxtx *rxtx)
1136 {
1137 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1138 	size_t flen = args->a3;
1139 	uint32_t endpoint_id = args->a4;
1140 	struct mem_frag_state *s = NULL;
1141 	tee_mm_entry_t *mm = NULL;
1142 	unsigned int page_count = 0;
1143 	void *buf = NULL;
1144 	uint32_t ret_w1 = 0;
1145 	uint32_t ret_w2 = 0;
1146 	uint32_t ret_w3 = 0;
1147 	uint32_t ret_fid = 0;
1148 	int rc = 0;
1149 
1150 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1151 		uint16_t guest_id = endpoint_id >> 16;
1152 
1153 		if (!guest_id || virt_set_guest(guest_id)) {
1154 			rc = FFA_INVALID_PARAMETERS;
1155 			goto out_set_rc;
1156 		}
1157 	}
1158 
1159 	/*
1160 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1161 	 * requests.
1162 	 */
1163 
1164 	cpu_spin_lock(&rxtx->spinlock);
1165 
1166 	s = get_frag_state(global_handle);
1167 	if (!s) {
1168 		rc = FFA_INVALID_PARAMETERS;
1169 		goto out;
1170 	}
1171 
1172 	mm = s->mm;
1173 	if (mm) {
1174 		if (flen > tee_mm_get_bytes(mm)) {
1175 			rc = FFA_INVALID_PARAMETERS;
1176 			goto out;
1177 		}
1178 		page_count = s->share.page_count;
1179 		buf = (void *)tee_mm_get_smem(mm);
1180 	} else {
1181 		if (flen > rxtx->size) {
1182 			rc = FFA_INVALID_PARAMETERS;
1183 			goto out;
1184 		}
1185 		buf = rxtx->rx;
1186 	}
1187 
1188 	rc = add_mem_share_frag(s, buf, flen);
1189 out:
1190 	virt_unset_guest();
1191 	cpu_spin_unlock(&rxtx->spinlock);
1192 
1193 	if (rc <= 0 && mm) {
1194 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1195 		tee_mm_free(mm);
1196 	}
1197 
1198 out_set_rc:
1199 	if (rc < 0) {
1200 		ret_fid = FFA_ERROR;
1201 		ret_w2 = rc;
1202 	} else if (rc > 0) {
1203 		ret_fid = FFA_MEM_FRAG_RX;
1204 		ret_w3 = rc;
1205 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1206 	} else {
1207 		ret_fid = FFA_SUCCESS_32;
1208 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1209 	}
1210 
1211 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1212 }
1213 
1214 static void handle_mem_reclaim(struct thread_smc_args *args)
1215 {
1216 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1217 	uint32_t ret_fid = FFA_ERROR;
1218 	uint64_t cookie = 0;
1219 
1220 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1221 		goto out;
1222 
1223 	cookie = reg_pair_to_64(args->a2, args->a1);
1224 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1225 		uint16_t guest_id = 0;
1226 
1227 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1228 			guest_id = virt_find_guest_by_cookie(cookie);
1229 		} else {
1230 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1231 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1232 		}
1233 		if (!guest_id || virt_set_guest(guest_id))
1234 			goto out;
1235 	}
1236 
1237 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1238 	case TEE_SUCCESS:
1239 		ret_fid = FFA_SUCCESS_32;
1240 		ret_val = 0;
1241 		break;
1242 	case TEE_ERROR_ITEM_NOT_FOUND:
1243 		DMSG("cookie %#"PRIx64" not found", cookie);
1244 		ret_val = FFA_INVALID_PARAMETERS;
1245 		break;
1246 	default:
1247 		DMSG("cookie %#"PRIx64" busy", cookie);
1248 		ret_val = FFA_DENIED;
1249 		break;
1250 	}
1251 
1252 	virt_unset_guest();
1253 
1254 out:
1255 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
1256 }
1257 #endif
1258 
1259 /* Only called from assembly */
1260 void thread_spmc_msg_recv(struct thread_smc_args *args);
1261 void thread_spmc_msg_recv(struct thread_smc_args *args)
1262 {
1263 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1264 	switch (args->a0) {
1265 #if defined(CFG_CORE_SEL1_SPMC)
1266 	case FFA_VERSION:
1267 		spmc_handle_version(args, &nw_rxtx);
1268 		break;
1269 	case FFA_FEATURES:
1270 		handle_features(args);
1271 		break;
1272 	case FFA_SPM_ID_GET:
1273 		handle_spm_id_get(args);
1274 		break;
1275 #ifdef ARM64
1276 	case FFA_RXTX_MAP_64:
1277 #endif
1278 	case FFA_RXTX_MAP_32:
1279 		spmc_handle_rxtx_map(args, &nw_rxtx);
1280 		break;
1281 	case FFA_RXTX_UNMAP:
1282 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
1283 		break;
1284 	case FFA_RX_RELEASE:
1285 		spmc_handle_rx_release(args, &nw_rxtx);
1286 		break;
1287 	case FFA_PARTITION_INFO_GET:
1288 		spmc_handle_partition_info_get(args, &nw_rxtx);
1289 		break;
1290 #endif /*CFG_CORE_SEL1_SPMC*/
1291 	case FFA_INTERRUPT:
1292 		interrupt_main_handler();
1293 		spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1294 		break;
1295 #ifdef ARM64
1296 	case FFA_MSG_SEND_DIRECT_REQ_64:
1297 #endif
1298 	case FFA_MSG_SEND_DIRECT_REQ_32:
1299 		handle_direct_request(args, &nw_rxtx);
1300 		break;
1301 #if defined(CFG_CORE_SEL1_SPMC)
1302 #ifdef ARM64
1303 	case FFA_MEM_SHARE_64:
1304 #endif
1305 	case FFA_MEM_SHARE_32:
1306 		handle_mem_share(args, &nw_rxtx);
1307 		break;
1308 	case FFA_MEM_RECLAIM:
1309 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1310 		    !ffa_mem_reclaim(args, NULL))
1311 			handle_mem_reclaim(args);
1312 		break;
1313 	case FFA_MEM_FRAG_TX:
1314 		handle_mem_frag_tx(args, &nw_rxtx);
1315 		break;
1316 #endif /*CFG_CORE_SEL1_SPMC*/
1317 	default:
1318 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1319 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1320 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1321 	}
1322 }
1323 
1324 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1325 {
1326 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1327 	struct thread_ctx *thr = threads + thread_get_id();
1328 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1329 	struct optee_msg_arg *arg = NULL;
1330 	struct mobj *mobj = NULL;
1331 	uint32_t num_params = 0;
1332 	size_t sz = 0;
1333 
1334 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1335 	if (!mobj) {
1336 		EMSG("Can't find cookie %#"PRIx64, cookie);
1337 		return TEE_ERROR_BAD_PARAMETERS;
1338 	}
1339 
1340 	res = mobj_inc_map(mobj);
1341 	if (res)
1342 		goto out_put_mobj;
1343 
1344 	res = TEE_ERROR_BAD_PARAMETERS;
1345 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1346 	if (!arg)
1347 		goto out_dec_map;
1348 
1349 	num_params = READ_ONCE(arg->num_params);
1350 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1351 		goto out_dec_map;
1352 
1353 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1354 
1355 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1356 	if (!thr->rpc_arg)
1357 		goto out_dec_map;
1358 
1359 	virt_on_stdcall();
1360 	res = tee_entry_std(arg, num_params);
1361 
1362 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1363 	thr->rpc_arg = NULL;
1364 
1365 out_dec_map:
1366 	mobj_dec_map(mobj);
1367 out_put_mobj:
1368 	mobj_put(mobj);
1369 	return res;
1370 }
1371 
1372 /*
1373  * Helper routine for the assembly function thread_std_smc_entry()
1374  *
1375  * Note: this function is weak just to make link_dummies_paged.c happy.
1376  */
1377 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1378 				       uint32_t a2, uint32_t a3,
1379 				       uint32_t a4, uint32_t a5 __unused)
1380 {
1381 	/*
1382 	 * Arguments are supplied from handle_yielding_call() as:
1383 	 * a0 <- w1
1384 	 * a1 <- w3
1385 	 * a2 <- w4
1386 	 * a3 <- w5
1387 	 * a4 <- w6
1388 	 * a5 <- w7
1389 	 */
1390 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1391 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1392 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1393 	return FFA_DENIED;
1394 }
1395 
1396 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1397 {
1398 	uint64_t offs = tpm->u.memref.offs;
1399 
1400 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1401 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1402 
1403 	param->u.fmem.offs_low = offs;
1404 	param->u.fmem.offs_high = offs >> 32;
1405 	if (param->u.fmem.offs_high != offs >> 32)
1406 		return false;
1407 
1408 	param->u.fmem.size = tpm->u.memref.size;
1409 	if (tpm->u.memref.mobj) {
1410 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1411 
1412 		/* If a mobj is passed it better be one with a valid cookie. */
1413 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1414 			return false;
1415 		param->u.fmem.global_id = cookie;
1416 	} else {
1417 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1418 	}
1419 
1420 	return true;
1421 }
1422 
1423 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1424 			    struct thread_param *params,
1425 			    struct optee_msg_arg **arg_ret)
1426 {
1427 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1428 	struct thread_ctx *thr = threads + thread_get_id();
1429 	struct optee_msg_arg *arg = thr->rpc_arg;
1430 
1431 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1432 		return TEE_ERROR_BAD_PARAMETERS;
1433 
1434 	if (!arg) {
1435 		EMSG("rpc_arg not set");
1436 		return TEE_ERROR_GENERIC;
1437 	}
1438 
1439 	memset(arg, 0, sz);
1440 	arg->cmd = cmd;
1441 	arg->num_params = num_params;
1442 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1443 
1444 	for (size_t n = 0; n < num_params; n++) {
1445 		switch (params[n].attr) {
1446 		case THREAD_PARAM_ATTR_NONE:
1447 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1448 			break;
1449 		case THREAD_PARAM_ATTR_VALUE_IN:
1450 		case THREAD_PARAM_ATTR_VALUE_OUT:
1451 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1452 			arg->params[n].attr = params[n].attr -
1453 					      THREAD_PARAM_ATTR_VALUE_IN +
1454 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1455 			arg->params[n].u.value.a = params[n].u.value.a;
1456 			arg->params[n].u.value.b = params[n].u.value.b;
1457 			arg->params[n].u.value.c = params[n].u.value.c;
1458 			break;
1459 		case THREAD_PARAM_ATTR_MEMREF_IN:
1460 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1461 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1462 			if (!set_fmem(arg->params + n, params + n))
1463 				return TEE_ERROR_BAD_PARAMETERS;
1464 			break;
1465 		default:
1466 			return TEE_ERROR_BAD_PARAMETERS;
1467 		}
1468 	}
1469 
1470 	if (arg_ret)
1471 		*arg_ret = arg;
1472 
1473 	return TEE_SUCCESS;
1474 }
1475 
1476 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1477 				struct thread_param *params)
1478 {
1479 	for (size_t n = 0; n < num_params; n++) {
1480 		switch (params[n].attr) {
1481 		case THREAD_PARAM_ATTR_VALUE_OUT:
1482 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1483 			params[n].u.value.a = arg->params[n].u.value.a;
1484 			params[n].u.value.b = arg->params[n].u.value.b;
1485 			params[n].u.value.c = arg->params[n].u.value.c;
1486 			break;
1487 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1488 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1489 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1490 			break;
1491 		default:
1492 			break;
1493 		}
1494 	}
1495 
1496 	return arg->ret;
1497 }
1498 
1499 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1500 			struct thread_param *params)
1501 {
1502 	struct thread_rpc_arg rpc_arg = { .call = {
1503 			.w1 = thread_get_tsd()->rpc_target_info,
1504 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1505 		},
1506 	};
1507 	struct optee_msg_arg *arg = NULL;
1508 	uint32_t ret = 0;
1509 
1510 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1511 	if (ret)
1512 		return ret;
1513 
1514 	thread_rpc(&rpc_arg);
1515 
1516 	return get_rpc_arg_res(arg, num_params, params);
1517 }
1518 
1519 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1520 {
1521 	struct thread_rpc_arg rpc_arg = { .call = {
1522 			.w1 = thread_get_tsd()->rpc_target_info,
1523 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1524 		},
1525 	};
1526 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1527 	uint32_t res2 = 0;
1528 	uint32_t res = 0;
1529 
1530 	DMSG("freeing cookie %#"PRIx64, cookie);
1531 
1532 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1533 
1534 	mobj_put(mobj);
1535 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1536 	if (res2)
1537 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1538 		     cookie, res2);
1539 	if (!res)
1540 		thread_rpc(&rpc_arg);
1541 }
1542 
1543 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1544 {
1545 	struct thread_rpc_arg rpc_arg = { .call = {
1546 			.w1 = thread_get_tsd()->rpc_target_info,
1547 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1548 		},
1549 	};
1550 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1551 	struct optee_msg_arg *arg = NULL;
1552 	unsigned int internal_offset = 0;
1553 	struct mobj *mobj = NULL;
1554 	uint64_t cookie = 0;
1555 
1556 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1557 		return NULL;
1558 
1559 	thread_rpc(&rpc_arg);
1560 
1561 	if (arg->num_params != 1 ||
1562 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1563 		return NULL;
1564 
1565 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1566 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1567 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1568 	if (!mobj) {
1569 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1570 		     cookie, internal_offset);
1571 		return NULL;
1572 	}
1573 
1574 	assert(mobj_is_nonsec(mobj));
1575 
1576 	if (mobj->size < size) {
1577 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1578 		mobj_put(mobj);
1579 		return NULL;
1580 	}
1581 
1582 	if (mobj_inc_map(mobj)) {
1583 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1584 		mobj_put(mobj);
1585 		return NULL;
1586 	}
1587 
1588 	return mobj;
1589 }
1590 
1591 struct mobj *thread_rpc_alloc_payload(size_t size)
1592 {
1593 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1594 }
1595 
1596 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1597 {
1598 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1599 }
1600 
1601 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1602 {
1603 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1604 }
1605 
1606 void thread_rpc_free_payload(struct mobj *mobj)
1607 {
1608 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1609 			mobj);
1610 }
1611 
1612 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1613 {
1614 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1615 }
1616 
1617 void thread_rpc_free_global_payload(struct mobj *mobj)
1618 {
1619 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1620 			mobj);
1621 }
1622 
1623 void thread_spmc_register_secondary_ep(vaddr_t ep)
1624 {
1625 	unsigned long ret = 0;
1626 
1627 	/* Let the SPM know the entry point for secondary CPUs */
1628 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
1629 
1630 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
1631 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
1632 }
1633 
1634 #if defined(CFG_CORE_SEL1_SPMC)
1635 static TEE_Result spmc_init(void)
1636 {
1637 	my_endpoint_id = SPMC_ENDPOINT_ID;
1638 	DMSG("My endpoint ID %#x", my_endpoint_id);
1639 
1640 	/*
1641 	 * If SPMD think we are version 1.0 it will report version 1.0 to
1642 	 * normal world regardless of what version we query the SPM with.
1643 	 * However, if SPMD think we are version 1.1 it will forward
1644 	 * queries from normal world to let us negotiate version. So by
1645 	 * setting version 1.0 here we should be compatible.
1646 	 *
1647 	 * Note that disagreement on negotiated version means that we'll
1648 	 * have communication problems with normal world.
1649 	 */
1650 	nw_rxtx.ffa_vers = FFA_VERSION_1_0;
1651 
1652 	return TEE_SUCCESS;
1653 }
1654 #else /* !defined(CFG_CORE_SEL1_SPMC) */
1655 static bool is_ffa_success(uint32_t fid)
1656 {
1657 #ifdef ARM64
1658 	if (fid == FFA_SUCCESS_64)
1659 		return true;
1660 #endif
1661 	return fid == FFA_SUCCESS_32;
1662 }
1663 
1664 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1665 {
1666 	struct thread_smc_args args = {
1667 #ifdef ARM64
1668 		.a0 = FFA_RXTX_MAP_64,
1669 #else
1670 		.a0 = FFA_RXTX_MAP_32,
1671 #endif
1672 		.a1 = virt_to_phys(rxtx->tx),
1673 		.a2 = virt_to_phys(rxtx->rx),
1674 		.a3 = 1,
1675 	};
1676 
1677 	thread_smccc(&args);
1678 	if (!is_ffa_success(args.a0)) {
1679 		if (args.a0 == FFA_ERROR)
1680 			EMSG("rxtx map failed with error %ld", args.a2);
1681 		else
1682 			EMSG("rxtx map failed");
1683 		panic();
1684 	}
1685 }
1686 
1687 static uint16_t get_my_id(void)
1688 {
1689 	struct thread_smc_args args = {
1690 		.a0 = FFA_ID_GET,
1691 	};
1692 
1693 	thread_smccc(&args);
1694 	if (!is_ffa_success(args.a0)) {
1695 		if (args.a0 == FFA_ERROR)
1696 			EMSG("Get id failed with error %ld", args.a2);
1697 		else
1698 			EMSG("Get id failed");
1699 		panic();
1700 	}
1701 
1702 	return args.a2;
1703 }
1704 
1705 static uint32_t get_ffa_version(uint32_t my_version)
1706 {
1707 	struct thread_smc_args args = {
1708 		.a0 = FFA_VERSION,
1709 		.a1 = my_version,
1710 	};
1711 
1712 	thread_smccc(&args);
1713 	if (args.a0 & BIT(31)) {
1714 		EMSG("FF-A version failed with error %ld", args.a0);
1715 		panic();
1716 	}
1717 
1718 	return args.a0;
1719 }
1720 
1721 static void *spmc_retrieve_req(uint64_t cookie,
1722 			       struct ffa_mem_transaction_x *trans)
1723 {
1724 	struct ffa_mem_access *acc_descr_array = NULL;
1725 	struct ffa_mem_access_perm *perm_descr = NULL;
1726 	struct thread_smc_args args = {
1727 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1728 		.a3 =	0,	/* Address, Using TX -> MBZ */
1729 		.a4 =   0,	/* Using TX -> MBZ */
1730 	};
1731 	size_t size = 0;
1732 	int rc = 0;
1733 
1734 	if (nw_rxtx.ffa_vers == FFA_VERSION_1_0) {
1735 		struct ffa_mem_transaction_1_0 *trans_descr = nw_rxtx.tx;
1736 
1737 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1738 		memset(trans_descr, 0, size);
1739 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1740 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1741 		trans_descr->global_handle = cookie;
1742 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1743 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1744 		trans_descr->mem_access_count = 1;
1745 		acc_descr_array = trans_descr->mem_access_array;
1746 	} else {
1747 		struct ffa_mem_transaction_1_1 *trans_descr = nw_rxtx.tx;
1748 
1749 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
1750 		memset(trans_descr, 0, size);
1751 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1752 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1753 		trans_descr->global_handle = cookie;
1754 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1755 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1756 		trans_descr->mem_access_count = 1;
1757 		trans_descr->mem_access_offs = sizeof(*trans_descr);
1758 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
1759 		acc_descr_array = (void *)((vaddr_t)nw_rxtx.tx +
1760 					   sizeof(*trans_descr));
1761 	}
1762 	acc_descr_array->region_offs = 0;
1763 	acc_descr_array->reserved = 0;
1764 	perm_descr = &acc_descr_array->access_perm;
1765 	perm_descr->endpoint_id = my_endpoint_id;
1766 	perm_descr->perm = FFA_MEM_ACC_RW;
1767 	perm_descr->flags = 0;
1768 
1769 	args.a1 = size; /* Total Length */
1770 	args.a2 = size; /* Frag Length == Total length */
1771 	thread_smccc(&args);
1772 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1773 		if (args.a0 == FFA_ERROR)
1774 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1775 			     cookie, (int)args.a2);
1776 		else
1777 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1778 			     cookie, args.a0);
1779 		return NULL;
1780 	}
1781 	rc = spmc_read_mem_transaction(nw_rxtx.ffa_vers, nw_rxtx.tx,
1782 				       nw_rxtx.size, trans);
1783 	if (rc) {
1784 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
1785 		     cookie, rc);
1786 		return NULL;
1787 	}
1788 
1789 	return nw_rxtx.rx;
1790 }
1791 
1792 void thread_spmc_relinquish(uint64_t cookie)
1793 {
1794 	struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx;
1795 	struct thread_smc_args args = {
1796 		.a0 = FFA_MEM_RELINQUISH,
1797 	};
1798 
1799 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1800 	relinquish_desc->handle = cookie;
1801 	relinquish_desc->flags = 0;
1802 	relinquish_desc->endpoint_count = 1;
1803 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1804 	thread_smccc(&args);
1805 	if (!is_ffa_success(args.a0))
1806 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1807 }
1808 
1809 static int set_pages(struct ffa_address_range *regions,
1810 		     unsigned int num_regions, unsigned int num_pages,
1811 		     struct mobj_ffa *mf)
1812 {
1813 	unsigned int n = 0;
1814 	unsigned int idx = 0;
1815 
1816 	for (n = 0; n < num_regions; n++) {
1817 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1818 		uint64_t addr = READ_ONCE(regions[n].address);
1819 
1820 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1821 			return FFA_INVALID_PARAMETERS;
1822 	}
1823 
1824 	if (idx != num_pages)
1825 		return FFA_INVALID_PARAMETERS;
1826 
1827 	return 0;
1828 }
1829 
1830 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1831 {
1832 	struct mobj_ffa *ret = NULL;
1833 	struct ffa_mem_transaction_x retrieve_desc = { };
1834 	struct ffa_mem_access *descr_array = NULL;
1835 	struct ffa_mem_region *descr = NULL;
1836 	struct mobj_ffa *mf = NULL;
1837 	unsigned int num_pages = 0;
1838 	unsigned int offs = 0;
1839 	void *buf = NULL;
1840 	struct thread_smc_args ffa_rx_release_args = {
1841 		.a0 = FFA_RX_RELEASE
1842 	};
1843 
1844 	/*
1845 	 * OP-TEE is only supporting a single mem_region while the
1846 	 * specification allows for more than one.
1847 	 */
1848 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
1849 	if (!buf) {
1850 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1851 		     cookie);
1852 		return NULL;
1853 	}
1854 
1855 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
1856 	offs = READ_ONCE(descr_array->region_offs);
1857 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
1858 
1859 	num_pages = READ_ONCE(descr->total_page_count);
1860 	mf = mobj_ffa_spmc_new(cookie, num_pages);
1861 	if (!mf)
1862 		goto out;
1863 
1864 	if (set_pages(descr->address_range_array,
1865 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1866 		mobj_ffa_spmc_delete(mf);
1867 		goto out;
1868 	}
1869 
1870 	ret = mf;
1871 
1872 out:
1873 	/* Release RX buffer after the mem retrieve request. */
1874 	thread_smccc(&ffa_rx_release_args);
1875 
1876 	return ret;
1877 }
1878 
1879 static TEE_Result spmc_init(void)
1880 {
1881 	unsigned int major = 0;
1882 	unsigned int minor __maybe_unused = 0;
1883 	uint32_t my_vers = 0;
1884 	uint32_t vers = 0;
1885 
1886 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
1887 	vers = get_ffa_version(my_vers);
1888 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
1889 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
1890 	DMSG("SPMC reported version %u.%u", major, minor);
1891 	if (major != FFA_VERSION_MAJOR) {
1892 		EMSG("Incompatible major version %u, expected %u",
1893 		     major, FFA_VERSION_MAJOR);
1894 		panic();
1895 	}
1896 	if (vers < my_vers)
1897 		my_vers = vers;
1898 	DMSG("Using version %u.%u",
1899 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
1900 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
1901 	nw_rxtx.ffa_vers = my_vers;
1902 
1903 	spmc_rxtx_map(&nw_rxtx);
1904 	my_endpoint_id = get_my_id();
1905 	DMSG("My endpoint ID %#x", my_endpoint_id);
1906 
1907 	return TEE_SUCCESS;
1908 }
1909 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
1910 
1911 /*
1912  * boot_final() is always done before exiting at end of boot
1913  * initialization.  In case of virtualization the init-calls are done only
1914  * once a OP-TEE partition has been created. So with virtualization we have
1915  * to initialize via boot_final() to make sure we have a value assigned
1916  * before it's used the first time.
1917  */
1918 #ifdef CFG_NS_VIRTUALIZATION
1919 boot_final(spmc_init);
1920 #else
1921 service_init(spmc_init);
1922 #endif
1923