xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 4af447d4084e293800d4e463d65003c016b91f29)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <initcall.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_spmc.h>
19 #include <mm/core_mmu.h>
20 #include <mm/mobj.h>
21 #include <optee_ffa.h>
22 #include <optee_msg.h>
23 #include <optee_rpc_cmd.h>
24 #include <string.h>
25 #include <sys/queue.h>
26 #include <tee/entry_std.h>
27 #include <util.h>
28 
29 #include "thread_private.h"
30 
31 #if defined(CFG_CORE_SEL1_SPMC)
32 struct mem_share_state {
33 	struct mobj_ffa *mf;
34 	unsigned int page_count;
35 	unsigned int region_count;
36 	unsigned int current_page_idx;
37 };
38 
39 struct mem_frag_state {
40 	struct mem_share_state share;
41 	tee_mm_entry_t *mm;
42 	unsigned int frag_offset;
43 	SLIST_ENTRY(mem_frag_state) link;
44 };
45 #endif
46 
47 /* Initialized in spmc_init() below */
48 static uint16_t my_endpoint_id;
49 
50 /*
51  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
52  *
53  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
54  * access this includes the use of content of struct ffa_rxtx::rx and
55  * @frag_state_head.
56  *
57  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
58  * ffa_rxtx::tx and false when it is owned by normal world.
59  *
60  * Note that we can't prevent normal world from updating the content of
61  * these buffers so we must always be careful when reading. while we hold
62  * the lock.
63  */
64 
65 #ifdef CFG_CORE_SEL2_SPMC
66 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
67 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
68 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf };
69 #else
70 static struct ffa_rxtx nw_rxtx;
71 
72 static bool is_nw_buf(struct ffa_rxtx *rxtx)
73 {
74 	return rxtx == &nw_rxtx;
75 }
76 
77 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
78 	SLIST_HEAD_INITIALIZER(&frag_state_head);
79 #endif
80 
81 static uint32_t swap_src_dst(uint32_t src_dst)
82 {
83 	return (src_dst >> 16) | (src_dst << 16);
84 }
85 
86 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
87 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
88 {
89 	*args = (struct thread_smc_args){ .a0 = fid,
90 					  .a1 = src_dst,
91 					  .a2 = w2,
92 					  .a3 = w3,
93 					  .a4 = w4,
94 					  .a5 = w5, };
95 }
96 
97 #if defined(CFG_CORE_SEL1_SPMC)
98 void spmc_handle_version(struct thread_smc_args *args)
99 {
100 	/*
101 	 * We currently only support one version, 1.0 so let's keep it
102 	 * simple.
103 	 */
104 	spmc_set_args(args,
105 		      MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
106 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
107 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
108 }
109 
110 static void handle_features(struct thread_smc_args *args)
111 {
112 	uint32_t ret_fid = 0;
113 	uint32_t ret_w2 = FFA_PARAM_MBZ;
114 
115 	switch (args->a1) {
116 #ifdef ARM64
117 	case FFA_RXTX_MAP_64:
118 #endif
119 	case FFA_RXTX_MAP_32:
120 		ret_fid = FFA_SUCCESS_32;
121 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
122 		break;
123 #ifdef ARM64
124 	case FFA_MEM_SHARE_64:
125 #endif
126 	case FFA_MEM_SHARE_32:
127 		ret_fid = FFA_SUCCESS_32;
128 		/*
129 		 * Partition manager supports transmission of a memory
130 		 * transaction descriptor in a buffer dynamically allocated
131 		 * by the endpoint.
132 		 */
133 		ret_w2 = BIT(0);
134 		break;
135 
136 	case FFA_ERROR:
137 	case FFA_VERSION:
138 	case FFA_SUCCESS_32:
139 #ifdef ARM64
140 	case FFA_SUCCESS_64:
141 #endif
142 	case FFA_MEM_FRAG_TX:
143 	case FFA_MEM_RECLAIM:
144 	case FFA_MSG_SEND_DIRECT_REQ_32:
145 	case FFA_INTERRUPT:
146 	case FFA_PARTITION_INFO_GET:
147 	case FFA_RX_RELEASE:
148 		ret_fid = FFA_SUCCESS_32;
149 		break;
150 	default:
151 		ret_fid = FFA_ERROR;
152 		ret_w2 = FFA_NOT_SUPPORTED;
153 		break;
154 	}
155 
156 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
157 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
158 }
159 
160 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
161 {
162 	tee_mm_entry_t *mm = NULL;
163 
164 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
165 		return FFA_INVALID_PARAMETERS;
166 
167 	mm = tee_mm_alloc(&tee_mm_shm, sz);
168 	if (!mm)
169 		return FFA_NO_MEMORY;
170 
171 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
172 					  sz / SMALL_PAGE_SIZE,
173 					  MEM_AREA_NSEC_SHM)) {
174 		tee_mm_free(mm);
175 		return FFA_INVALID_PARAMETERS;
176 	}
177 
178 	*va_ret = (void *)tee_mm_get_smem(mm);
179 	return 0;
180 }
181 
182 static void unmap_buf(void *va, size_t sz)
183 {
184 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
185 
186 	assert(mm);
187 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
188 	tee_mm_free(mm);
189 }
190 
191 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
192 {
193 	int rc = 0;
194 	uint32_t ret_fid = FFA_ERROR;
195 	unsigned int sz = 0;
196 	paddr_t rx_pa = 0;
197 	paddr_t tx_pa = 0;
198 	void *rx = NULL;
199 	void *tx = NULL;
200 
201 	cpu_spin_lock(&rxtx->spinlock);
202 
203 	if (args->a3 & GENMASK_64(63, 6)) {
204 		rc = FFA_INVALID_PARAMETERS;
205 		goto out;
206 	}
207 
208 	sz = args->a3 * SMALL_PAGE_SIZE;
209 	if (!sz) {
210 		rc = FFA_INVALID_PARAMETERS;
211 		goto out;
212 	}
213 	/* TX/RX are swapped compared to the caller */
214 	tx_pa = args->a2;
215 	rx_pa = args->a1;
216 
217 	if (rxtx->size) {
218 		rc = FFA_DENIED;
219 		goto out;
220 	}
221 
222 	/*
223 	 * If the buffer comes from a SP the address is virtual and already
224 	 * mapped.
225 	 */
226 	if (is_nw_buf(rxtx)) {
227 		rc = map_buf(tx_pa, sz, &tx);
228 		if (rc)
229 			goto out;
230 		rc = map_buf(rx_pa, sz, &rx);
231 		if (rc) {
232 			unmap_buf(tx, sz);
233 			goto out;
234 		}
235 		rxtx->tx = tx;
236 		rxtx->rx = rx;
237 	} else {
238 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
239 			rc = FFA_INVALID_PARAMETERS;
240 			goto out;
241 		}
242 
243 		if (!virt_to_phys((void *)tx_pa) ||
244 		    !virt_to_phys((void *)rx_pa)) {
245 			rc = FFA_INVALID_PARAMETERS;
246 			goto out;
247 		}
248 
249 		rxtx->tx = (void *)tx_pa;
250 		rxtx->rx = (void *)rx_pa;
251 	}
252 
253 	rxtx->size = sz;
254 	rxtx->tx_is_mine = true;
255 	ret_fid = FFA_SUCCESS_32;
256 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
257 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
258 out:
259 	cpu_spin_unlock(&rxtx->spinlock);
260 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
261 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
262 }
263 
264 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
265 {
266 	uint32_t ret_fid = FFA_ERROR;
267 	int rc = FFA_INVALID_PARAMETERS;
268 
269 	cpu_spin_lock(&rxtx->spinlock);
270 
271 	if (!rxtx->size)
272 		goto out;
273 
274 	/* We don't unmap the SP memory as the SP might still use it */
275 	if (is_nw_buf(rxtx)) {
276 		unmap_buf(rxtx->rx, rxtx->size);
277 		unmap_buf(rxtx->tx, rxtx->size);
278 	}
279 	rxtx->size = 0;
280 	rxtx->rx = NULL;
281 	rxtx->tx = NULL;
282 	ret_fid = FFA_SUCCESS_32;
283 	rc = 0;
284 out:
285 	cpu_spin_unlock(&rxtx->spinlock);
286 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
287 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
288 }
289 
290 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
291 {
292 	uint32_t ret_fid = 0;
293 	int rc = 0;
294 
295 	cpu_spin_lock(&rxtx->spinlock);
296 	/* The senders RX is our TX */
297 	if (!rxtx->size || rxtx->tx_is_mine) {
298 		ret_fid = FFA_ERROR;
299 		rc = FFA_DENIED;
300 	} else {
301 		ret_fid = FFA_SUCCESS_32;
302 		rc = 0;
303 		rxtx->tx_is_mine = true;
304 	}
305 	cpu_spin_unlock(&rxtx->spinlock);
306 
307 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
308 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
309 }
310 
311 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
312 {
313 	return !w0 && !w1 && !w2 && !w3;
314 }
315 
316 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
317 {
318 	/*
319 	 * This depends on which UUID we have been assigned.
320 	 * TODO add a generic mechanism to obtain our UUID.
321 	 *
322 	 * The test below is for the hard coded UUID
323 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
324 	 */
325 	return w0 == 0xe0786148 && w1 == 0xe311f8e7 &&
326 	       w2 == 0x02005ebc && w3 == 0x1bc5d5a5;
327 }
328 
329 static void handle_partition_info_get(struct thread_smc_args *args,
330 				      struct ffa_rxtx *rxtx)
331 {
332 	uint32_t ret_fid = 0;
333 	int rc = 0;
334 
335 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
336 	    !is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
337 		ret_fid = FFA_ERROR;
338 		rc = FFA_INVALID_PARAMETERS;
339 		goto out;
340 	}
341 
342 	cpu_spin_lock(&rxtx->spinlock);
343 	if (rxtx->size && rxtx->tx_is_mine) {
344 		struct ffa_partition_info *fpi = rxtx->tx;
345 
346 		fpi->id = my_endpoint_id;
347 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
348 		/*
349 		 * Supports receipt of direct requests.
350 		 * Can send direct requests.
351 		 */
352 		fpi->partition_properties = BIT(0) | BIT(1);
353 
354 		ret_fid = FFA_SUCCESS_32;
355 		rc = 1;
356 		rxtx->tx_is_mine = false;
357 	} else {
358 		ret_fid = FFA_ERROR;
359 		if (rxtx->size)
360 			rc = FFA_BUSY;
361 		else
362 			rc = FFA_DENIED; /* TX buffer not setup yet */
363 	}
364 	cpu_spin_unlock(&rxtx->spinlock);
365 
366 out:
367 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
368 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
369 }
370 #endif /*CFG_CORE_SEL1_SPMC*/
371 
372 static void handle_yielding_call(struct thread_smc_args *args)
373 {
374 	TEE_Result res = 0;
375 
376 	thread_check_canaries();
377 
378 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
379 		/* Note connection to struct thread_rpc_arg::ret */
380 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
381 				       0);
382 		res = TEE_ERROR_BAD_PARAMETERS;
383 	} else {
384 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
385 				     args->a6, args->a7);
386 		res = TEE_ERROR_BUSY;
387 	}
388 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
389 		      swap_src_dst(args->a1), 0, res, 0, 0);
390 }
391 
392 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
393 {
394 	uint64_t cookie = reg_pair_to_64(a5, a4);
395 	uint32_t res = 0;
396 
397 	res = mobj_ffa_unregister_by_cookie(cookie);
398 	switch (res) {
399 	case TEE_SUCCESS:
400 	case TEE_ERROR_ITEM_NOT_FOUND:
401 		return 0;
402 	case TEE_ERROR_BUSY:
403 		EMSG("res %#"PRIx32, res);
404 		return FFA_BUSY;
405 	default:
406 		EMSG("res %#"PRIx32, res);
407 		return FFA_INVALID_PARAMETERS;
408 	}
409 }
410 
411 static void handle_blocking_call(struct thread_smc_args *args)
412 {
413 	switch (args->a3) {
414 	case OPTEE_FFA_GET_API_VERSION:
415 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
416 			      swap_src_dst(args->a1), 0,
417 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
418 			      0);
419 		break;
420 	case OPTEE_FFA_GET_OS_VERSION:
421 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
422 			      swap_src_dst(args->a1), 0,
423 			      CFG_OPTEE_REVISION_MAJOR,
424 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
425 		break;
426 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
427 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
428 			      swap_src_dst(args->a1), 0, 0,
429 			      THREAD_RPC_MAX_NUM_PARAMS, 0);
430 		break;
431 	case OPTEE_FFA_UNREGISTER_SHM:
432 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
433 			      swap_src_dst(args->a1), 0,
434 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
435 		break;
436 	default:
437 		EMSG("Unhandled blocking service ID %#"PRIx32,
438 		     (uint32_t)args->a3);
439 		panic();
440 	}
441 }
442 
443 #if defined(CFG_CORE_SEL1_SPMC)
444 static int get_acc_perms(struct ffa_mem_access *mem_acc,
445 			 unsigned int num_mem_accs, uint8_t *acc_perms,
446 			 unsigned int *region_offs)
447 {
448 	unsigned int n = 0;
449 
450 	for (n = 0; n < num_mem_accs; n++) {
451 		struct ffa_mem_access_perm *descr = &mem_acc[n].access_perm;
452 
453 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
454 			*acc_perms = READ_ONCE(descr->perm);
455 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
456 			return 0;
457 		}
458 	}
459 
460 	return FFA_INVALID_PARAMETERS;
461 }
462 
463 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
464 			  unsigned int *region_count, size_t *addr_range_offs)
465 {
466 	const uint8_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
467 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
468 	struct ffa_mem_region *region_descr = NULL;
469 	struct ffa_mem_transaction *descr = NULL;
470 	unsigned int num_mem_accs = 0;
471 	uint8_t mem_acc_perm = 0;
472 	unsigned int region_descr_offs = 0;
473 	size_t n = 0;
474 
475 	if (!ALIGNMENT_IS_OK(buf, struct ffa_mem_transaction) ||
476 	    blen < sizeof(struct ffa_mem_transaction))
477 		return FFA_INVALID_PARAMETERS;
478 
479 	descr = buf;
480 
481 	/* Check that the endpoint memory access descriptor array fits */
482 	num_mem_accs = READ_ONCE(descr->mem_access_count);
483 	if (MUL_OVERFLOW(sizeof(struct ffa_mem_access), num_mem_accs, &n) ||
484 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
485 		return FFA_INVALID_PARAMETERS;
486 
487 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
488 		return FFA_INVALID_PARAMETERS;
489 
490 	/* Check that the access permissions matches what's expected */
491 	if (get_acc_perms(descr->mem_access_array,
492 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
493 	    mem_acc_perm != exp_mem_acc_perm)
494 		return FFA_INVALID_PARAMETERS;
495 
496 	/* Check that the Composite memory region descriptor fits */
497 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
498 	    n > blen)
499 		return FFA_INVALID_PARAMETERS;
500 
501 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
502 			     struct ffa_mem_region))
503 		return FFA_INVALID_PARAMETERS;
504 
505 	region_descr = (struct ffa_mem_region *)((vaddr_t)descr +
506 						 region_descr_offs);
507 	*page_count = READ_ONCE(region_descr->total_page_count);
508 	*region_count = READ_ONCE(region_descr->address_range_count);
509 	*addr_range_offs = n;
510 	return 0;
511 }
512 
513 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
514 				size_t flen)
515 {
516 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
517 	struct ffa_address_range *arange = NULL;
518 	unsigned int n = 0;
519 
520 	if (region_count > s->region_count)
521 		region_count = s->region_count;
522 
523 	if (!ALIGNMENT_IS_OK(buf, struct ffa_address_range))
524 		return FFA_INVALID_PARAMETERS;
525 	arange = buf;
526 
527 	for (n = 0; n < region_count; n++) {
528 		unsigned int page_count = READ_ONCE(arange[n].page_count);
529 		uint64_t addr = READ_ONCE(arange[n].address);
530 
531 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
532 					  addr, page_count))
533 			return FFA_INVALID_PARAMETERS;
534 	}
535 
536 	s->region_count -= region_count;
537 	if (s->region_count)
538 		return region_count * sizeof(*arange);
539 
540 	if (s->current_page_idx != s->page_count)
541 		return FFA_INVALID_PARAMETERS;
542 
543 	return 0;
544 }
545 
546 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
547 {
548 	int rc = 0;
549 
550 	rc = add_mem_share_helper(&s->share, buf, flen);
551 	if (rc >= 0) {
552 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
553 			if (s->share.region_count)
554 				return s->frag_offset;
555 			/* We're done, return the number of consumed bytes */
556 			rc = s->frag_offset;
557 		} else {
558 			rc = FFA_INVALID_PARAMETERS;
559 		}
560 	}
561 
562 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
563 	if (rc < 0)
564 		mobj_ffa_sel1_spmc_delete(s->share.mf);
565 	else
566 		mobj_ffa_push_to_inactive(s->share.mf);
567 	free(s);
568 
569 	return rc;
570 }
571 
572 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
573 			 size_t flen, uint64_t *global_handle)
574 {
575 	int rc = 0;
576 	struct mem_share_state share = { };
577 	size_t addr_range_offs = 0;
578 	size_t n = 0;
579 
580 	if (flen > blen)
581 		return FFA_INVALID_PARAMETERS;
582 
583 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
584 			    &addr_range_offs);
585 	if (rc)
586 		return rc;
587 
588 	if (MUL_OVERFLOW(share.region_count,
589 			 sizeof(struct ffa_address_range), &n) ||
590 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
591 		return FFA_INVALID_PARAMETERS;
592 
593 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
594 	if (!share.mf)
595 		return FFA_NO_MEMORY;
596 
597 	if (flen != blen) {
598 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
599 
600 		if (!s) {
601 			rc = FFA_NO_MEMORY;
602 			goto err;
603 		}
604 		s->share = share;
605 		s->mm = mm;
606 		s->frag_offset = addr_range_offs;
607 
608 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
609 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
610 					flen - addr_range_offs);
611 
612 		if (rc >= 0)
613 			*global_handle = mobj_ffa_get_cookie(share.mf);
614 
615 		return rc;
616 	}
617 
618 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
619 				  flen - addr_range_offs);
620 	if (rc) {
621 		/*
622 		 * Number of consumed bytes may be returned instead of 0 for
623 		 * done.
624 		 */
625 		rc = FFA_INVALID_PARAMETERS;
626 		goto err;
627 	}
628 
629 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
630 
631 	return 0;
632 err:
633 	mobj_ffa_sel1_spmc_delete(share.mf);
634 	return rc;
635 }
636 
637 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
638 				 unsigned int page_count,
639 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
640 {
641 	int rc = 0;
642 	size_t len = 0;
643 	tee_mm_entry_t *mm = NULL;
644 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
645 
646 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
647 		return FFA_INVALID_PARAMETERS;
648 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
649 		return FFA_INVALID_PARAMETERS;
650 
651 	/*
652 	 * Check that the length reported in blen is covered by len even
653 	 * if the offset is taken into account.
654 	 */
655 	if (len < blen || len - offs < blen)
656 		return FFA_INVALID_PARAMETERS;
657 
658 	mm = tee_mm_alloc(&tee_mm_shm, len);
659 	if (!mm)
660 		return FFA_NO_MEMORY;
661 
662 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
663 					  page_count, MEM_AREA_NSEC_SHM)) {
664 		rc = FFA_INVALID_PARAMETERS;
665 		goto out;
666 	}
667 
668 	cpu_spin_lock(&rxtx->spinlock);
669 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
670 			   global_handle);
671 	cpu_spin_unlock(&rxtx->spinlock);
672 	if (rc > 0)
673 		return rc;
674 
675 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
676 out:
677 	tee_mm_free(mm);
678 	return rc;
679 }
680 
681 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
682 				  uint64_t *global_handle,
683 				  struct ffa_rxtx *rxtx)
684 {
685 	int rc = FFA_DENIED;
686 
687 	cpu_spin_lock(&rxtx->spinlock);
688 
689 	if (rxtx->rx && flen <= rxtx->size)
690 		rc = add_mem_share(NULL, rxtx->rx, blen, flen, global_handle);
691 
692 	cpu_spin_unlock(&rxtx->spinlock);
693 
694 	return rc;
695 }
696 
697 static void handle_mem_share(struct thread_smc_args *args,
698 			     struct ffa_rxtx *rxtx)
699 {
700 	uint32_t ret_w1 = 0;
701 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
702 	uint32_t ret_w3 = 0;
703 	uint32_t ret_fid = FFA_ERROR;
704 	uint64_t global_handle = 0;
705 	int rc = 0;
706 
707 	/* Check that the MBZs are indeed 0 */
708 	if (args->a5 || args->a6 || args->a7)
709 		goto out;
710 
711 	if (!args->a3) {
712 		/*
713 		 * The memory transaction descriptor is passed via our rx
714 		 * buffer.
715 		 */
716 		if (args->a4)
717 			goto out;
718 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
719 					    rxtx);
720 	} else {
721 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
722 					   args->a4, &global_handle, rxtx);
723 	}
724 	if (rc < 0) {
725 		ret_w2 = rc;
726 		goto out;
727 	}
728 	if (rc > 0) {
729 		ret_fid = FFA_MEM_FRAG_RX;
730 		ret_w3 = rc;
731 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
732 	}
733 	ret_fid = FFA_SUCCESS_32;
734 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
735 out:
736 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
737 }
738 
739 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
740 {
741 	struct mem_frag_state *s = NULL;
742 
743 	SLIST_FOREACH(s, &frag_state_head, link)
744 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
745 			return s;
746 
747 	return NULL;
748 }
749 
750 static void handle_mem_frag_tx(struct thread_smc_args *args,
751 			       struct ffa_rxtx *rxtx)
752 {
753 	int rc = 0;
754 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
755 						READ_ONCE(args->a1));
756 	size_t flen = READ_ONCE(args->a3);
757 	struct mem_frag_state *s = NULL;
758 	tee_mm_entry_t *mm = NULL;
759 	unsigned int page_count = 0;
760 	void *buf = NULL;
761 	uint32_t ret_w1 = 0;
762 	uint32_t ret_w2 = 0;
763 	uint32_t ret_w3 = 0;
764 	uint32_t ret_fid = 0;
765 
766 	/*
767 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
768 	 * requests.
769 	 */
770 
771 	cpu_spin_lock(&rxtx->spinlock);
772 
773 	s = get_frag_state(global_handle);
774 	if (!s) {
775 		rc = FFA_INVALID_PARAMETERS;
776 		goto out;
777 	}
778 
779 	mm = s->mm;
780 	if (mm) {
781 		if (flen > tee_mm_get_bytes(mm)) {
782 			rc = FFA_INVALID_PARAMETERS;
783 			goto out;
784 		}
785 		page_count = s->share.page_count;
786 		buf = (void *)tee_mm_get_smem(mm);
787 	} else {
788 		if (flen > rxtx->size) {
789 			rc = FFA_INVALID_PARAMETERS;
790 			goto out;
791 		}
792 		buf = rxtx->rx;
793 	}
794 
795 	rc = add_mem_share_frag(s, buf, flen);
796 out:
797 	cpu_spin_unlock(&rxtx->spinlock);
798 
799 	if (rc <= 0 && mm) {
800 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
801 		tee_mm_free(mm);
802 	}
803 
804 	if (rc < 0) {
805 		ret_fid = FFA_ERROR;
806 		ret_w2 = rc;
807 	} else if (rc > 0) {
808 		ret_fid = FFA_MEM_FRAG_RX;
809 		ret_w3 = rc;
810 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
811 	} else {
812 		ret_fid = FFA_SUCCESS_32;
813 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
814 	}
815 
816 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
817 }
818 
819 static void handle_mem_reclaim(struct thread_smc_args *args)
820 {
821 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
822 	uint32_t ret_fid = FFA_ERROR;
823 	uint64_t cookie = 0;
824 
825 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
826 		goto out;
827 
828 	cookie = reg_pair_to_64(args->a2, args->a1);
829 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
830 	case TEE_SUCCESS:
831 		ret_fid = FFA_SUCCESS_32;
832 		ret_val = 0;
833 		break;
834 	case TEE_ERROR_ITEM_NOT_FOUND:
835 		DMSG("cookie %#"PRIx64" not found", cookie);
836 		ret_val = FFA_INVALID_PARAMETERS;
837 		break;
838 	default:
839 		DMSG("cookie %#"PRIx64" busy", cookie);
840 		ret_val = FFA_DENIED;
841 		break;
842 	}
843 out:
844 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
845 }
846 #endif
847 
848 /* Only called from assembly */
849 void thread_spmc_msg_recv(struct thread_smc_args *args);
850 void thread_spmc_msg_recv(struct thread_smc_args *args)
851 {
852 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
853 	switch (args->a0) {
854 #if defined(CFG_CORE_SEL1_SPMC)
855 	case FFA_VERSION:
856 		spmc_handle_version(args);
857 		break;
858 	case FFA_FEATURES:
859 		handle_features(args);
860 		break;
861 #ifdef ARM64
862 	case FFA_RXTX_MAP_64:
863 #endif
864 	case FFA_RXTX_MAP_32:
865 		spmc_handle_rxtx_map(args, &nw_rxtx);
866 		break;
867 	case FFA_RXTX_UNMAP:
868 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
869 		break;
870 	case FFA_RX_RELEASE:
871 		spmc_handle_rx_release(args, &nw_rxtx);
872 		break;
873 	case FFA_PARTITION_INFO_GET:
874 		handle_partition_info_get(args, &nw_rxtx);
875 		break;
876 #endif /*CFG_CORE_SEL1_SPMC*/
877 	case FFA_INTERRUPT:
878 		itr_core_handler();
879 		spmc_set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
880 		break;
881 	case FFA_MSG_SEND_DIRECT_REQ_32:
882 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
883 		    FFA_DST(args->a1) != my_endpoint_id) {
884 			spmc_sp_start_thread(args);
885 			break;
886 		}
887 
888 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
889 			handle_yielding_call(args);
890 		else
891 			handle_blocking_call(args);
892 		break;
893 #if defined(CFG_CORE_SEL1_SPMC)
894 #ifdef ARM64
895 	case FFA_MEM_SHARE_64:
896 #endif
897 	case FFA_MEM_SHARE_32:
898 		handle_mem_share(args, &nw_rxtx);
899 		break;
900 	case FFA_MEM_RECLAIM:
901 		handle_mem_reclaim(args);
902 		break;
903 	case FFA_MEM_FRAG_TX:
904 		handle_mem_frag_tx(args, &nw_rxtx);
905 		break;
906 #endif /*CFG_CORE_SEL1_SPMC*/
907 	default:
908 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
909 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
910 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
911 	}
912 }
913 
914 static uint32_t yielding_call_with_arg(uint64_t cookie, uint32_t offset)
915 {
916 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
917 	struct thread_ctx *thr = threads + thread_get_id();
918 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
919 	struct optee_msg_arg *arg = NULL;
920 	struct mobj *mobj = NULL;
921 	uint32_t num_params = 0;
922 	size_t sz = 0;
923 
924 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
925 	if (!mobj) {
926 		EMSG("Can't find cookie %#"PRIx64, cookie);
927 		return TEE_ERROR_BAD_PARAMETERS;
928 	}
929 
930 	rv = mobj_inc_map(mobj);
931 	if (rv)
932 		goto out_put_mobj;
933 
934 	rv = TEE_ERROR_BAD_PARAMETERS;
935 	arg = mobj_get_va(mobj, offset);
936 	if (!arg)
937 		goto out_dec_map;
938 
939 	if (!mobj_get_va(mobj, sizeof(*arg)))
940 		goto out_dec_map;
941 
942 	num_params = READ_ONCE(arg->num_params);
943 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
944 		goto out_dec_map;
945 
946 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
947 	thr->rpc_arg = mobj_get_va(mobj, offset + sz);
948 	if (!thr->rpc_arg || !mobj_get_va(mobj, offset + sz + sz_rpc))
949 		goto out_dec_map;
950 
951 	rv = tee_entry_std(arg, num_params);
952 
953 	thread_rpc_shm_cache_clear(&thr->shm_cache);
954 	thr->rpc_arg = NULL;
955 
956 out_dec_map:
957 	mobj_dec_map(mobj);
958 out_put_mobj:
959 	mobj_put(mobj);
960 	return rv;
961 }
962 
963 /*
964  * Helper routine for the assembly function thread_std_smc_entry()
965  *
966  * Note: this function is weak just to make it possible to exclude it from
967  * the unpaged area.
968  */
969 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
970 				       uint32_t a2, uint32_t a3,
971 				       uint32_t a4, uint32_t a5 __unused)
972 {
973 	/*
974 	 * Arguments are supplied from handle_yielding_call() as:
975 	 * a0 <- w1
976 	 * a1 <- w3
977 	 * a2 <- w4
978 	 * a3 <- w5
979 	 * a4 <- w6
980 	 * a5 <- w7
981 	 */
982 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
983 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
984 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
985 	return FFA_DENIED;
986 }
987 
988 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
989 {
990 	uint64_t offs = tpm->u.memref.offs;
991 
992 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
993 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
994 
995 	param->u.fmem.offs_low = offs;
996 	param->u.fmem.offs_high = offs >> 32;
997 	if (param->u.fmem.offs_high != offs >> 32)
998 		return false;
999 
1000 	param->u.fmem.size = tpm->u.memref.size;
1001 	if (tpm->u.memref.mobj) {
1002 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1003 
1004 		/* If a mobj is passed it better be one with a valid cookie. */
1005 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1006 			return false;
1007 		param->u.fmem.global_id = cookie;
1008 	} else {
1009 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1010 	}
1011 
1012 	return true;
1013 }
1014 
1015 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1016 			    struct thread_param *params,
1017 			    struct optee_msg_arg **arg_ret)
1018 {
1019 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1020 	struct thread_ctx *thr = threads + thread_get_id();
1021 	struct optee_msg_arg *arg = thr->rpc_arg;
1022 
1023 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1024 		return TEE_ERROR_BAD_PARAMETERS;
1025 
1026 	if (!arg) {
1027 		EMSG("rpc_arg not set");
1028 		return TEE_ERROR_GENERIC;
1029 	}
1030 
1031 	memset(arg, 0, sz);
1032 	arg->cmd = cmd;
1033 	arg->num_params = num_params;
1034 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1035 
1036 	for (size_t n = 0; n < num_params; n++) {
1037 		switch (params[n].attr) {
1038 		case THREAD_PARAM_ATTR_NONE:
1039 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1040 			break;
1041 		case THREAD_PARAM_ATTR_VALUE_IN:
1042 		case THREAD_PARAM_ATTR_VALUE_OUT:
1043 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1044 			arg->params[n].attr = params[n].attr -
1045 					      THREAD_PARAM_ATTR_VALUE_IN +
1046 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1047 			arg->params[n].u.value.a = params[n].u.value.a;
1048 			arg->params[n].u.value.b = params[n].u.value.b;
1049 			arg->params[n].u.value.c = params[n].u.value.c;
1050 			break;
1051 		case THREAD_PARAM_ATTR_MEMREF_IN:
1052 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1053 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1054 			if (!set_fmem(arg->params + n, params + n))
1055 				return TEE_ERROR_BAD_PARAMETERS;
1056 			break;
1057 		default:
1058 			return TEE_ERROR_BAD_PARAMETERS;
1059 		}
1060 	}
1061 
1062 	if (arg_ret)
1063 		*arg_ret = arg;
1064 
1065 	return TEE_SUCCESS;
1066 }
1067 
1068 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1069 				struct thread_param *params)
1070 {
1071 	for (size_t n = 0; n < num_params; n++) {
1072 		switch (params[n].attr) {
1073 		case THREAD_PARAM_ATTR_VALUE_OUT:
1074 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1075 			params[n].u.value.a = arg->params[n].u.value.a;
1076 			params[n].u.value.b = arg->params[n].u.value.b;
1077 			params[n].u.value.c = arg->params[n].u.value.c;
1078 			break;
1079 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1080 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1081 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1082 			break;
1083 		default:
1084 			break;
1085 		}
1086 	}
1087 
1088 	return arg->ret;
1089 }
1090 
1091 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1092 			struct thread_param *params)
1093 {
1094 	struct thread_rpc_arg rpc_arg = { .call = {
1095 			.w1 = thread_get_tsd()->rpc_target_info,
1096 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1097 		},
1098 	};
1099 	struct optee_msg_arg *arg = NULL;
1100 	uint32_t ret = 0;
1101 
1102 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1103 	if (ret)
1104 		return ret;
1105 
1106 	thread_rpc(&rpc_arg);
1107 
1108 	return get_rpc_arg_res(arg, num_params, params);
1109 }
1110 
1111 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1112 {
1113 	struct thread_rpc_arg rpc_arg = { .call = {
1114 			.w1 = thread_get_tsd()->rpc_target_info,
1115 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1116 		},
1117 	};
1118 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1119 	uint32_t res2 = 0;
1120 	uint32_t res = 0;
1121 
1122 	DMSG("freeing cookie %#"PRIx64, cookie);
1123 
1124 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1125 
1126 	mobj_put(mobj);
1127 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1128 	if (res2)
1129 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1130 		     cookie, res2);
1131 	if (!res)
1132 		thread_rpc(&rpc_arg);
1133 }
1134 
1135 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1136 {
1137 	struct thread_rpc_arg rpc_arg = { .call = {
1138 			.w1 = thread_get_tsd()->rpc_target_info,
1139 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1140 		},
1141 	};
1142 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1143 	struct optee_msg_arg *arg = NULL;
1144 	unsigned int internal_offset = 0;
1145 	struct mobj *mobj = NULL;
1146 	uint64_t cookie = 0;
1147 
1148 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1149 		return NULL;
1150 
1151 	thread_rpc(&rpc_arg);
1152 
1153 	if (arg->num_params != 1 ||
1154 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1155 		return NULL;
1156 
1157 	internal_offset = arg->params->u.fmem.internal_offs;
1158 	cookie = arg->params->u.fmem.global_id;
1159 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1160 	if (!mobj) {
1161 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1162 		     cookie, internal_offset);
1163 		return NULL;
1164 	}
1165 
1166 	assert(mobj_is_nonsec(mobj));
1167 
1168 	if (mobj_inc_map(mobj)) {
1169 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1170 		mobj_put(mobj);
1171 		return NULL;
1172 	}
1173 
1174 	return mobj;
1175 }
1176 
1177 struct mobj *thread_rpc_alloc_payload(size_t size)
1178 {
1179 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1180 }
1181 
1182 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1183 {
1184 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1185 }
1186 
1187 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1188 {
1189 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1190 }
1191 
1192 void thread_rpc_free_payload(struct mobj *mobj)
1193 {
1194 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1195 			mobj);
1196 }
1197 
1198 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1199 {
1200 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1201 }
1202 
1203 void thread_rpc_free_global_payload(struct mobj *mobj)
1204 {
1205 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1206 			mobj);
1207 }
1208 
1209 #ifdef CFG_CORE_SEL2_SPMC
1210 static bool is_ffa_success(uint32_t fid)
1211 {
1212 #ifdef ARM64
1213 	if (fid == FFA_SUCCESS_64)
1214 		return true;
1215 #endif
1216 	return fid == FFA_SUCCESS_32;
1217 }
1218 
1219 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1220 {
1221 	struct thread_smc_args args = {
1222 #ifdef ARM64
1223 		.a0 = FFA_RXTX_MAP_64,
1224 #else
1225 		.a0 = FFA_RXTX_MAP_32,
1226 #endif
1227 		.a1 = (vaddr_t)rxtx->tx,
1228 		.a2 = (vaddr_t)rxtx->rx,
1229 		.a3 = 1,
1230 	};
1231 
1232 	thread_smccc(&args);
1233 	if (!is_ffa_success(args.a0)) {
1234 		if (args.a0 == FFA_ERROR)
1235 			EMSG("rxtx map failed with error %ld", args.a2);
1236 		else
1237 			EMSG("rxtx map failed");
1238 		panic();
1239 	}
1240 }
1241 
1242 static uint16_t spmc_get_id(void)
1243 {
1244 	struct thread_smc_args args = {
1245 		.a0 = FFA_ID_GET,
1246 	};
1247 
1248 	thread_smccc(&args);
1249 	if (!is_ffa_success(args.a0)) {
1250 		if (args.a0 == FFA_ERROR)
1251 			EMSG("Get id failed with error %ld", args.a2);
1252 		else
1253 			EMSG("Get id failed");
1254 		panic();
1255 	}
1256 
1257 	return args.a2;
1258 }
1259 
1260 static struct ffa_mem_transaction *spmc_retrieve_req(uint64_t cookie)
1261 {
1262 	struct ffa_mem_transaction *trans_descr = nw_rxtx.tx;
1263 	struct ffa_mem_access *acc_descr_array = NULL;
1264 	struct ffa_mem_access_perm *perm_descr = NULL;
1265 	size_t size = sizeof(*trans_descr) +
1266 		      1 * sizeof(struct ffa_mem_access);
1267 	struct thread_smc_args args = {
1268 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1269 		.a1 =   size,	/* Total Length */
1270 		.a2 =	size,	/* Frag Length == Total length */
1271 		.a3 =	0,	/* Address, Using TX -> MBZ */
1272 		.a4 =   0,	/* Using TX -> MBZ */
1273 	};
1274 
1275 	memset(trans_descr, 0, size);
1276 	trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1277 	trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1278 	trans_descr->global_handle = cookie;
1279 	trans_descr->flags = FFA_MEMORY_REGION_FLAG_TIME_SLICE |
1280 			     FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1281 			     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1282 	trans_descr->mem_access_count = 1;
1283 	acc_descr_array = trans_descr->mem_access_array;
1284 	acc_descr_array->region_offs = 0;
1285 	acc_descr_array->reserved = 0;
1286 	perm_descr = &acc_descr_array->access_perm;
1287 	perm_descr->endpoint_id = my_endpoint_id;
1288 	perm_descr->perm = FFA_MEM_ACC_RW;
1289 	perm_descr->flags = FFA_MEMORY_REGION_FLAG_TIME_SLICE;
1290 
1291 	thread_smccc(&args);
1292 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1293 		if (args.a0 == FFA_ERROR)
1294 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1295 			     cookie, (int)args.a2);
1296 		else
1297 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1298 			     cookie, args.a0);
1299 		return NULL;
1300 	}
1301 
1302 	return nw_rxtx.rx;
1303 }
1304 
1305 void thread_spmc_relinquish(uint64_t cookie)
1306 {
1307 	struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx;
1308 	struct thread_smc_args args = {
1309 		.a0 = FFA_MEM_RELINQUISH,
1310 	};
1311 
1312 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1313 	relinquish_desc->handle = cookie;
1314 	relinquish_desc->flags = 0;
1315 	relinquish_desc->endpoint_count = 1;
1316 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1317 	thread_smccc(&args);
1318 	if (!is_ffa_success(args.a0))
1319 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1320 }
1321 
1322 static int set_pages(struct ffa_address_range *regions,
1323 		     unsigned int num_regions, unsigned int num_pages,
1324 		     struct mobj_ffa *mf)
1325 {
1326 	unsigned int n = 0;
1327 	unsigned int idx = 0;
1328 
1329 	for (n = 0; n < num_regions; n++) {
1330 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1331 		uint64_t addr = READ_ONCE(regions[n].address);
1332 
1333 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1334 			return FFA_INVALID_PARAMETERS;
1335 	}
1336 
1337 	if (idx != num_pages)
1338 		return FFA_INVALID_PARAMETERS;
1339 
1340 	return 0;
1341 }
1342 
1343 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1344 {
1345 	struct mobj_ffa *ret = NULL;
1346 	struct ffa_mem_transaction *retrieve_desc = NULL;
1347 	struct ffa_mem_access *descr_array = NULL;
1348 	struct ffa_mem_region *descr = NULL;
1349 	struct mobj_ffa *mf = NULL;
1350 	unsigned int num_pages = 0;
1351 	unsigned int offs = 0;
1352 	struct thread_smc_args ffa_rx_release_args = {
1353 		.a0 = FFA_RX_RELEASE
1354 	};
1355 
1356 	/*
1357 	 * OP-TEE is only supporting a single mem_region while the
1358 	 * specification allows for more than one.
1359 	 */
1360 	retrieve_desc = spmc_retrieve_req(cookie);
1361 	if (!retrieve_desc) {
1362 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1363 		     cookie);
1364 		return NULL;
1365 	}
1366 
1367 	descr_array = retrieve_desc->mem_access_array;
1368 	offs = READ_ONCE(descr_array->region_offs);
1369 	descr = (struct ffa_mem_region *)((vaddr_t)retrieve_desc + offs);
1370 
1371 	num_pages = READ_ONCE(descr->total_page_count);
1372 	mf = mobj_ffa_sel2_spmc_new(cookie, num_pages);
1373 	if (!mf)
1374 		goto out;
1375 
1376 	if (set_pages(descr->address_range_array,
1377 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1378 		mobj_ffa_sel2_spmc_delete(mf);
1379 		goto out;
1380 	}
1381 
1382 	ret = mf;
1383 
1384 out:
1385 	/* Release RX buffer after the mem retrieve request. */
1386 	thread_smccc(&ffa_rx_release_args);
1387 
1388 	return ret;
1389 }
1390 
1391 static TEE_Result spmc_init(void)
1392 {
1393 	spmc_rxtx_map(&nw_rxtx);
1394 	my_endpoint_id = spmc_get_id();
1395 	DMSG("My endpoint ID %#x", my_endpoint_id);
1396 
1397 	return TEE_SUCCESS;
1398 }
1399 #endif /*CFG_CORE_SEL2_SPMC*/
1400 
1401 #if defined(CFG_CORE_SEL1_SPMC)
1402 static TEE_Result spmc_init(void)
1403 {
1404 	my_endpoint_id = SPMC_ENDPOINT_ID;
1405 	DMSG("My endpoint ID %#x", my_endpoint_id);
1406 
1407 	return TEE_SUCCESS;
1408 }
1409 #endif /*CFG_CORE_SEL1_SPMC*/
1410 
1411 service_init(spmc_init);
1412