xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 3fb72c2260c0ef0bdf9267aea2df7b3f9b076465)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2021, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <kernel/secure_partition.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/spmc_sp_handler.h>
16 #include <kernel/tee_misc.h>
17 #include <kernel/thread.h>
18 #include <kernel/thread_private.h>
19 #include <kernel/thread_spmc.h>
20 #include <mm/core_mmu.h>
21 #include <mm/mobj.h>
22 #include <optee_ffa.h>
23 #include <optee_msg.h>
24 #include <optee_rpc_cmd.h>
25 #include <string.h>
26 #include <sys/queue.h>
27 #include <tee/entry_std.h>
28 #include <tee/uuid.h>
29 #include <util.h>
30 
31 #if defined(CFG_CORE_SEL1_SPMC)
32 struct mem_share_state {
33 	struct mobj_ffa *mf;
34 	unsigned int page_count;
35 	unsigned int region_count;
36 	unsigned int current_page_idx;
37 };
38 
39 struct mem_frag_state {
40 	struct mem_share_state share;
41 	tee_mm_entry_t *mm;
42 	unsigned int frag_offset;
43 	SLIST_ENTRY(mem_frag_state) link;
44 };
45 #endif
46 
47 /* Initialized in spmc_init() below */
48 static uint16_t my_endpoint_id;
49 
50 /*
51  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
52  *
53  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
54  * access this includes the use of content of struct ffa_rxtx::rx and
55  * @frag_state_head.
56  *
57  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
58  * ffa_rxtx::tx and false when it is owned by normal world.
59  *
60  * Note that we can't prevent normal world from updating the content of
61  * these buffers so we must always be careful when reading. while we hold
62  * the lock.
63  */
64 
65 #ifdef CFG_CORE_SEL1_SPMC
66 static struct ffa_rxtx nw_rxtx;
67 
68 static bool is_nw_buf(struct ffa_rxtx *rxtx)
69 {
70 	return rxtx == &nw_rxtx;
71 }
72 
73 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
74 	SLIST_HEAD_INITIALIZER(&frag_state_head);
75 #else
76 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
77 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
78 static struct ffa_rxtx nw_rxtx = { .rx = __rx_buf, .tx = __tx_buf };
79 #endif
80 
81 static uint32_t swap_src_dst(uint32_t src_dst)
82 {
83 	return (src_dst >> 16) | (src_dst << 16);
84 }
85 
86 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
87 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
88 {
89 	*args = (struct thread_smc_args){ .a0 = fid,
90 					  .a1 = src_dst,
91 					  .a2 = w2,
92 					  .a3 = w3,
93 					  .a4 = w4,
94 					  .a5 = w5, };
95 }
96 
97 #if defined(CFG_CORE_SEL1_SPMC)
98 void spmc_handle_version(struct thread_smc_args *args)
99 {
100 	/*
101 	 * We currently only support one version, 1.0 so let's keep it
102 	 * simple.
103 	 */
104 	spmc_set_args(args,
105 		      MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
106 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
107 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
108 }
109 
110 static void handle_features(struct thread_smc_args *args)
111 {
112 	uint32_t ret_fid = 0;
113 	uint32_t ret_w2 = FFA_PARAM_MBZ;
114 
115 	switch (args->a1) {
116 #ifdef ARM64
117 	case FFA_RXTX_MAP_64:
118 #endif
119 	case FFA_RXTX_MAP_32:
120 		ret_fid = FFA_SUCCESS_32;
121 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
122 		break;
123 #ifdef ARM64
124 	case FFA_MEM_SHARE_64:
125 #endif
126 	case FFA_MEM_SHARE_32:
127 		ret_fid = FFA_SUCCESS_32;
128 		/*
129 		 * Partition manager supports transmission of a memory
130 		 * transaction descriptor in a buffer dynamically allocated
131 		 * by the endpoint.
132 		 */
133 		ret_w2 = BIT(0);
134 		break;
135 
136 	case FFA_ERROR:
137 	case FFA_VERSION:
138 	case FFA_SUCCESS_32:
139 #ifdef ARM64
140 	case FFA_SUCCESS_64:
141 #endif
142 	case FFA_MEM_FRAG_TX:
143 	case FFA_MEM_RECLAIM:
144 	case FFA_MSG_SEND_DIRECT_REQ_32:
145 	case FFA_INTERRUPT:
146 	case FFA_PARTITION_INFO_GET:
147 	case FFA_RX_RELEASE:
148 		ret_fid = FFA_SUCCESS_32;
149 		break;
150 	default:
151 		ret_fid = FFA_ERROR;
152 		ret_w2 = FFA_NOT_SUPPORTED;
153 		break;
154 	}
155 
156 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
157 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
158 }
159 
160 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
161 {
162 	tee_mm_entry_t *mm = NULL;
163 
164 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
165 		return FFA_INVALID_PARAMETERS;
166 
167 	mm = tee_mm_alloc(&tee_mm_shm, sz);
168 	if (!mm)
169 		return FFA_NO_MEMORY;
170 
171 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
172 					  sz / SMALL_PAGE_SIZE,
173 					  MEM_AREA_NSEC_SHM)) {
174 		tee_mm_free(mm);
175 		return FFA_INVALID_PARAMETERS;
176 	}
177 
178 	*va_ret = (void *)tee_mm_get_smem(mm);
179 	return 0;
180 }
181 
182 static void unmap_buf(void *va, size_t sz)
183 {
184 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
185 
186 	assert(mm);
187 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
188 	tee_mm_free(mm);
189 }
190 
191 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
192 {
193 	int rc = 0;
194 	uint32_t ret_fid = FFA_ERROR;
195 	unsigned int sz = 0;
196 	paddr_t rx_pa = 0;
197 	paddr_t tx_pa = 0;
198 	void *rx = NULL;
199 	void *tx = NULL;
200 
201 	cpu_spin_lock(&rxtx->spinlock);
202 
203 	if (args->a3 & GENMASK_64(63, 6)) {
204 		rc = FFA_INVALID_PARAMETERS;
205 		goto out;
206 	}
207 
208 	sz = args->a3 * SMALL_PAGE_SIZE;
209 	if (!sz) {
210 		rc = FFA_INVALID_PARAMETERS;
211 		goto out;
212 	}
213 	/* TX/RX are swapped compared to the caller */
214 	tx_pa = args->a2;
215 	rx_pa = args->a1;
216 
217 	if (rxtx->size) {
218 		rc = FFA_DENIED;
219 		goto out;
220 	}
221 
222 	/*
223 	 * If the buffer comes from a SP the address is virtual and already
224 	 * mapped.
225 	 */
226 	if (is_nw_buf(rxtx)) {
227 		rc = map_buf(tx_pa, sz, &tx);
228 		if (rc)
229 			goto out;
230 		rc = map_buf(rx_pa, sz, &rx);
231 		if (rc) {
232 			unmap_buf(tx, sz);
233 			goto out;
234 		}
235 		rxtx->tx = tx;
236 		rxtx->rx = rx;
237 	} else {
238 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
239 			rc = FFA_INVALID_PARAMETERS;
240 			goto out;
241 		}
242 
243 		if (!virt_to_phys((void *)tx_pa) ||
244 		    !virt_to_phys((void *)rx_pa)) {
245 			rc = FFA_INVALID_PARAMETERS;
246 			goto out;
247 		}
248 
249 		rxtx->tx = (void *)tx_pa;
250 		rxtx->rx = (void *)rx_pa;
251 	}
252 
253 	rxtx->size = sz;
254 	rxtx->tx_is_mine = true;
255 	ret_fid = FFA_SUCCESS_32;
256 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
257 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
258 out:
259 	cpu_spin_unlock(&rxtx->spinlock);
260 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
261 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
262 }
263 
264 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
265 {
266 	uint32_t ret_fid = FFA_ERROR;
267 	int rc = FFA_INVALID_PARAMETERS;
268 
269 	cpu_spin_lock(&rxtx->spinlock);
270 
271 	if (!rxtx->size)
272 		goto out;
273 
274 	/* We don't unmap the SP memory as the SP might still use it */
275 	if (is_nw_buf(rxtx)) {
276 		unmap_buf(rxtx->rx, rxtx->size);
277 		unmap_buf(rxtx->tx, rxtx->size);
278 	}
279 	rxtx->size = 0;
280 	rxtx->rx = NULL;
281 	rxtx->tx = NULL;
282 	ret_fid = FFA_SUCCESS_32;
283 	rc = 0;
284 out:
285 	cpu_spin_unlock(&rxtx->spinlock);
286 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
287 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
288 }
289 
290 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
291 {
292 	uint32_t ret_fid = 0;
293 	int rc = 0;
294 
295 	cpu_spin_lock(&rxtx->spinlock);
296 	/* The senders RX is our TX */
297 	if (!rxtx->size || rxtx->tx_is_mine) {
298 		ret_fid = FFA_ERROR;
299 		rc = FFA_DENIED;
300 	} else {
301 		ret_fid = FFA_SUCCESS_32;
302 		rc = 0;
303 		rxtx->tx_is_mine = true;
304 	}
305 	cpu_spin_unlock(&rxtx->spinlock);
306 
307 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
308 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
309 }
310 
311 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
312 {
313 	return !w0 && !w1 && !w2 && !w3;
314 }
315 
316 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
317 {
318 	/*
319 	 * This depends on which UUID we have been assigned.
320 	 * TODO add a generic mechanism to obtain our UUID.
321 	 *
322 	 * The test below is for the hard coded UUID
323 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
324 	 */
325 	return w0 == 0xe0786148 && w1 == 0xe311f8e7 &&
326 	       w2 == 0x02005ebc && w3 == 0x1bc5d5a5;
327 }
328 
329 void spmc_fill_partition_entry(struct ffa_partition_info *fpi,
330 			       uint16_t endpoint_id, uint16_t execution_context)
331 {
332 	fpi->id = endpoint_id;
333 	/* Number of execution contexts implemented by this partition */
334 	fpi->execution_context = execution_context;
335 
336 	fpi->partition_properties = FFA_PARTITION_DIRECT_REQ_RECV_SUPPORT |
337 				    FFA_PARTITION_DIRECT_REQ_SEND_SUPPORT;
338 }
339 
340 static uint32_t handle_partition_info_get_all(size_t *elem_count,
341 					      struct ffa_rxtx *rxtx)
342 {
343 	struct ffa_partition_info *fpi = rxtx->tx;
344 
345 	/* Add OP-TEE SP */
346 	spmc_fill_partition_entry(fpi, my_endpoint_id, CFG_TEE_CORE_NB_CORE);
347 	rxtx->tx_is_mine = false;
348 	*elem_count = 1;
349 	fpi++;
350 
351 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
352 		size_t count = (rxtx->size / sizeof(*fpi)) - 1;
353 
354 		if (sp_partition_info_get(fpi, NULL, &count))
355 			return FFA_NO_MEMORY;
356 		*elem_count += count;
357 	}
358 
359 	return FFA_OK;
360 }
361 
362 void spmc_handle_partition_info_get(struct thread_smc_args *args,
363 				    struct ffa_rxtx *rxtx)
364 {
365 	uint32_t ret_fid = FFA_ERROR;
366 	uint32_t rc = 0;
367 	uint32_t endpoint_id = my_endpoint_id;
368 	struct ffa_partition_info *fpi = NULL;
369 
370 	cpu_spin_lock(&rxtx->spinlock);
371 
372 	if (!rxtx->size || !rxtx->tx_is_mine) {
373 		if (rxtx->size)
374 			rc = FFA_BUSY;
375 		else
376 			rc = FFA_DENIED; /* TX buffer not setup yet */
377 		goto out;
378 	}
379 
380 	fpi = rxtx->tx;
381 
382 	if (rxtx->size < sizeof(*fpi)) {
383 		ret_fid = FFA_ERROR;
384 		rc = FFA_NO_MEMORY;
385 		goto out;
386 	}
387 
388 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
389 		size_t elem_count = 0;
390 
391 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx);
392 
393 		if (ret_fid) {
394 			rc = ret_fid;
395 			ret_fid = FFA_ERROR;
396 		} else {
397 			ret_fid = FFA_SUCCESS_32;
398 			rc = elem_count;
399 		}
400 
401 		goto out;
402 	}
403 
404 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
405 		spmc_fill_partition_entry(fpi, endpoint_id,
406 					  CFG_TEE_CORE_NB_CORE);
407 		rc = 1;
408 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
409 		uint32_t uuid_array[4] = { 0 };
410 		TEE_UUID uuid = { };
411 		TEE_Result res = TEE_SUCCESS;
412 		size_t count = (rxtx->size / sizeof(*fpi));
413 
414 		uuid_array[0] = args->a1;
415 		uuid_array[1] = args->a2;
416 		uuid_array[2] = args->a3;
417 		uuid_array[3] = args->a4;
418 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
419 
420 		res = sp_partition_info_get(fpi, &uuid, &count);
421 		if (res != TEE_SUCCESS) {
422 			ret_fid = FFA_ERROR;
423 			rc = FFA_INVALID_PARAMETERS;
424 			goto out;
425 		}
426 		rc = count;
427 	} else {
428 		ret_fid = FFA_ERROR;
429 		rc = FFA_INVALID_PARAMETERS;
430 		goto out;
431 	}
432 
433 	ret_fid = FFA_SUCCESS_32;
434 	rxtx->tx_is_mine = false;
435 
436 out:
437 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
438 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
439 	cpu_spin_unlock(&rxtx->spinlock);
440 }
441 #endif /*CFG_CORE_SEL1_SPMC*/
442 
443 static void handle_yielding_call(struct thread_smc_args *args)
444 {
445 	TEE_Result res = 0;
446 
447 	thread_check_canaries();
448 
449 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
450 		/* Note connection to struct thread_rpc_arg::ret */
451 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
452 				       0);
453 		res = TEE_ERROR_BAD_PARAMETERS;
454 	} else {
455 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
456 				     args->a6, args->a7);
457 		res = TEE_ERROR_BUSY;
458 	}
459 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
460 		      swap_src_dst(args->a1), 0, res, 0, 0);
461 }
462 
463 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
464 {
465 	uint64_t cookie = reg_pair_to_64(a5, a4);
466 	uint32_t res = 0;
467 
468 	res = mobj_ffa_unregister_by_cookie(cookie);
469 	switch (res) {
470 	case TEE_SUCCESS:
471 	case TEE_ERROR_ITEM_NOT_FOUND:
472 		return 0;
473 	case TEE_ERROR_BUSY:
474 		EMSG("res %#"PRIx32, res);
475 		return FFA_BUSY;
476 	default:
477 		EMSG("res %#"PRIx32, res);
478 		return FFA_INVALID_PARAMETERS;
479 	}
480 }
481 
482 static void handle_blocking_call(struct thread_smc_args *args)
483 {
484 	switch (args->a3) {
485 	case OPTEE_FFA_GET_API_VERSION:
486 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
487 			      swap_src_dst(args->a1), 0,
488 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
489 			      0);
490 		break;
491 	case OPTEE_FFA_GET_OS_VERSION:
492 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
493 			      swap_src_dst(args->a1), 0,
494 			      CFG_OPTEE_REVISION_MAJOR,
495 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
496 		break;
497 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
498 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
499 			      swap_src_dst(args->a1), 0, 0,
500 			      THREAD_RPC_MAX_NUM_PARAMS,
501 			      OPTEE_FFA_SEC_CAP_ARG_OFFSET);
502 		break;
503 	case OPTEE_FFA_UNREGISTER_SHM:
504 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
505 			      swap_src_dst(args->a1), 0,
506 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
507 		break;
508 	default:
509 		EMSG("Unhandled blocking service ID %#"PRIx32,
510 		     (uint32_t)args->a3);
511 		panic();
512 	}
513 }
514 
515 #if defined(CFG_CORE_SEL1_SPMC)
516 static int get_acc_perms(struct ffa_mem_access *mem_acc,
517 			 unsigned int num_mem_accs, uint8_t *acc_perms,
518 			 unsigned int *region_offs)
519 {
520 	unsigned int n = 0;
521 
522 	for (n = 0; n < num_mem_accs; n++) {
523 		struct ffa_mem_access_perm *descr = &mem_acc[n].access_perm;
524 
525 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
526 			*acc_perms = READ_ONCE(descr->perm);
527 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
528 			return 0;
529 		}
530 	}
531 
532 	return FFA_INVALID_PARAMETERS;
533 }
534 
535 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
536 			  unsigned int *region_count, size_t *addr_range_offs)
537 {
538 	const uint8_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
539 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
540 	struct ffa_mem_region *region_descr = NULL;
541 	struct ffa_mem_transaction *descr = NULL;
542 	unsigned int num_mem_accs = 0;
543 	uint8_t mem_acc_perm = 0;
544 	unsigned int region_descr_offs = 0;
545 	size_t n = 0;
546 
547 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_mem_transaction) ||
548 	    blen < sizeof(struct ffa_mem_transaction))
549 		return FFA_INVALID_PARAMETERS;
550 
551 	descr = buf;
552 
553 	/* Check that the endpoint memory access descriptor array fits */
554 	num_mem_accs = READ_ONCE(descr->mem_access_count);
555 	if (MUL_OVERFLOW(sizeof(struct ffa_mem_access), num_mem_accs, &n) ||
556 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
557 		return FFA_INVALID_PARAMETERS;
558 
559 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
560 		return FFA_INVALID_PARAMETERS;
561 
562 	/* Check that the access permissions matches what's expected */
563 	if (get_acc_perms(descr->mem_access_array,
564 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
565 	    mem_acc_perm != exp_mem_acc_perm)
566 		return FFA_INVALID_PARAMETERS;
567 
568 	/* Check that the Composite memory region descriptor fits */
569 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
570 	    n > blen)
571 		return FFA_INVALID_PARAMETERS;
572 
573 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)descr + region_descr_offs,
574 				  struct ffa_mem_region))
575 		return FFA_INVALID_PARAMETERS;
576 
577 	region_descr = (struct ffa_mem_region *)((vaddr_t)descr +
578 						 region_descr_offs);
579 	*page_count = READ_ONCE(region_descr->total_page_count);
580 	*region_count = READ_ONCE(region_descr->address_range_count);
581 	*addr_range_offs = n;
582 	return 0;
583 }
584 
585 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
586 				size_t flen)
587 {
588 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
589 	struct ffa_address_range *arange = NULL;
590 	unsigned int n = 0;
591 
592 	if (region_count > s->region_count)
593 		region_count = s->region_count;
594 
595 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
596 		return FFA_INVALID_PARAMETERS;
597 	arange = buf;
598 
599 	for (n = 0; n < region_count; n++) {
600 		unsigned int page_count = READ_ONCE(arange[n].page_count);
601 		uint64_t addr = READ_ONCE(arange[n].address);
602 
603 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
604 					  addr, page_count))
605 			return FFA_INVALID_PARAMETERS;
606 	}
607 
608 	s->region_count -= region_count;
609 	if (s->region_count)
610 		return region_count * sizeof(*arange);
611 
612 	if (s->current_page_idx != s->page_count)
613 		return FFA_INVALID_PARAMETERS;
614 
615 	return 0;
616 }
617 
618 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
619 {
620 	int rc = 0;
621 
622 	rc = add_mem_share_helper(&s->share, buf, flen);
623 	if (rc >= 0) {
624 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
625 			/* We're not at the end of the descriptor yet */
626 			if (s->share.region_count)
627 				return s->frag_offset;
628 
629 			/* We're done */
630 			rc = 0;
631 		} else {
632 			rc = FFA_INVALID_PARAMETERS;
633 		}
634 	}
635 
636 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
637 	if (rc < 0)
638 		mobj_ffa_sel1_spmc_delete(s->share.mf);
639 	else
640 		mobj_ffa_push_to_inactive(s->share.mf);
641 	free(s);
642 
643 	return rc;
644 }
645 
646 static bool is_sp_share(void *buf)
647 {
648 	struct ffa_mem_transaction *input_descr = NULL;
649 	struct ffa_mem_access_perm *perm = NULL;
650 
651 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
652 		return false;
653 
654 	input_descr = buf;
655 	perm = &input_descr->mem_access_array[0].access_perm;
656 
657 	/*
658 	 * perm->endpoint_id is read here only to check if the endpoint is
659 	 * OP-TEE. We do read it later on again, but there are some additional
660 	 * checks there to make sure that the data is correct.
661 	 */
662 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
663 }
664 
665 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
666 			 size_t flen, uint64_t *global_handle)
667 {
668 	int rc = 0;
669 	struct mem_share_state share = { };
670 	size_t addr_range_offs = 0;
671 	size_t n = 0;
672 
673 	if (flen > blen)
674 		return FFA_INVALID_PARAMETERS;
675 
676 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
677 			    &addr_range_offs);
678 	if (rc)
679 		return rc;
680 
681 	if (MUL_OVERFLOW(share.region_count,
682 			 sizeof(struct ffa_address_range), &n) ||
683 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
684 		return FFA_INVALID_PARAMETERS;
685 
686 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
687 	if (!share.mf)
688 		return FFA_NO_MEMORY;
689 
690 	if (flen != blen) {
691 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
692 
693 		if (!s) {
694 			rc = FFA_NO_MEMORY;
695 			goto err;
696 		}
697 		s->share = share;
698 		s->mm = mm;
699 		s->frag_offset = addr_range_offs;
700 
701 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
702 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
703 					flen - addr_range_offs);
704 
705 		if (rc >= 0)
706 			*global_handle = mobj_ffa_get_cookie(share.mf);
707 
708 		return rc;
709 	}
710 
711 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
712 				  flen - addr_range_offs);
713 	if (rc) {
714 		/*
715 		 * Number of consumed bytes may be returned instead of 0 for
716 		 * done.
717 		 */
718 		rc = FFA_INVALID_PARAMETERS;
719 		goto err;
720 	}
721 
722 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
723 
724 	return 0;
725 err:
726 	mobj_ffa_sel1_spmc_delete(share.mf);
727 	return rc;
728 }
729 
730 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
731 				 unsigned int page_count,
732 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
733 {
734 	int rc = 0;
735 	size_t len = 0;
736 	tee_mm_entry_t *mm = NULL;
737 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
738 
739 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
740 		return FFA_INVALID_PARAMETERS;
741 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
742 		return FFA_INVALID_PARAMETERS;
743 
744 	/*
745 	 * Check that the length reported in blen is covered by len even
746 	 * if the offset is taken into account.
747 	 */
748 	if (len < blen || len - offs < blen)
749 		return FFA_INVALID_PARAMETERS;
750 
751 	mm = tee_mm_alloc(&tee_mm_shm, len);
752 	if (!mm)
753 		return FFA_NO_MEMORY;
754 
755 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
756 					  page_count, MEM_AREA_NSEC_SHM)) {
757 		rc = FFA_INVALID_PARAMETERS;
758 		goto out;
759 	}
760 
761 	cpu_spin_lock(&rxtx->spinlock);
762 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
763 			   global_handle);
764 	cpu_spin_unlock(&rxtx->spinlock);
765 	if (rc > 0)
766 		return rc;
767 
768 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
769 out:
770 	tee_mm_free(mm);
771 	return rc;
772 }
773 
774 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
775 				  uint64_t *global_handle,
776 				  struct ffa_rxtx *rxtx)
777 {
778 	int rc = FFA_DENIED;
779 
780 	cpu_spin_lock(&rxtx->spinlock);
781 
782 	if (rxtx->rx && flen <= rxtx->size) {
783 		if (is_sp_share(rxtx->rx)) {
784 			rc = spmc_sp_add_share(rxtx, blen,
785 					       global_handle, NULL);
786 		} else {
787 			rc = add_mem_share(NULL, rxtx->rx, blen, flen,
788 					   global_handle);
789 		}
790 	}
791 
792 	cpu_spin_unlock(&rxtx->spinlock);
793 
794 	return rc;
795 }
796 
797 static void handle_mem_share(struct thread_smc_args *args,
798 			     struct ffa_rxtx *rxtx)
799 {
800 	uint32_t ret_w1 = 0;
801 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
802 	uint32_t ret_w3 = 0;
803 	uint32_t ret_fid = FFA_ERROR;
804 	uint64_t global_handle = 0;
805 	int rc = 0;
806 
807 	/* Check that the MBZs are indeed 0 */
808 	if (args->a5 || args->a6 || args->a7)
809 		goto out;
810 
811 	if (!args->a3) {
812 		/*
813 		 * The memory transaction descriptor is passed via our rx
814 		 * buffer.
815 		 */
816 		if (args->a4)
817 			goto out;
818 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
819 					    rxtx);
820 	} else {
821 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
822 					   args->a4, &global_handle, rxtx);
823 	}
824 	if (rc < 0) {
825 		ret_w2 = rc;
826 	} else if (rc > 0) {
827 		ret_fid = FFA_MEM_FRAG_RX;
828 		ret_w3 = rc;
829 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
830 	} else {
831 		ret_fid = FFA_SUCCESS_32;
832 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
833 	}
834 out:
835 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
836 }
837 
838 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
839 {
840 	struct mem_frag_state *s = NULL;
841 
842 	SLIST_FOREACH(s, &frag_state_head, link)
843 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
844 			return s;
845 
846 	return NULL;
847 }
848 
849 static void handle_mem_frag_tx(struct thread_smc_args *args,
850 			       struct ffa_rxtx *rxtx)
851 {
852 	int rc = 0;
853 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
854 						READ_ONCE(args->a1));
855 	size_t flen = READ_ONCE(args->a3);
856 	struct mem_frag_state *s = NULL;
857 	tee_mm_entry_t *mm = NULL;
858 	unsigned int page_count = 0;
859 	void *buf = NULL;
860 	uint32_t ret_w1 = 0;
861 	uint32_t ret_w2 = 0;
862 	uint32_t ret_w3 = 0;
863 	uint32_t ret_fid = 0;
864 
865 	/*
866 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
867 	 * requests.
868 	 */
869 
870 	cpu_spin_lock(&rxtx->spinlock);
871 
872 	s = get_frag_state(global_handle);
873 	if (!s) {
874 		rc = FFA_INVALID_PARAMETERS;
875 		goto out;
876 	}
877 
878 	mm = s->mm;
879 	if (mm) {
880 		if (flen > tee_mm_get_bytes(mm)) {
881 			rc = FFA_INVALID_PARAMETERS;
882 			goto out;
883 		}
884 		page_count = s->share.page_count;
885 		buf = (void *)tee_mm_get_smem(mm);
886 	} else {
887 		if (flen > rxtx->size) {
888 			rc = FFA_INVALID_PARAMETERS;
889 			goto out;
890 		}
891 		buf = rxtx->rx;
892 	}
893 
894 	rc = add_mem_share_frag(s, buf, flen);
895 out:
896 	cpu_spin_unlock(&rxtx->spinlock);
897 
898 	if (rc <= 0 && mm) {
899 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
900 		tee_mm_free(mm);
901 	}
902 
903 	if (rc < 0) {
904 		ret_fid = FFA_ERROR;
905 		ret_w2 = rc;
906 	} else if (rc > 0) {
907 		ret_fid = FFA_MEM_FRAG_RX;
908 		ret_w3 = rc;
909 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
910 	} else {
911 		ret_fid = FFA_SUCCESS_32;
912 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
913 	}
914 
915 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
916 }
917 
918 static void handle_mem_reclaim(struct thread_smc_args *args)
919 {
920 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
921 	uint32_t ret_fid = FFA_ERROR;
922 	uint64_t cookie = 0;
923 
924 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
925 		goto out;
926 
927 	cookie = reg_pair_to_64(args->a2, args->a1);
928 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
929 	case TEE_SUCCESS:
930 		ret_fid = FFA_SUCCESS_32;
931 		ret_val = 0;
932 		break;
933 	case TEE_ERROR_ITEM_NOT_FOUND:
934 		DMSG("cookie %#"PRIx64" not found", cookie);
935 		ret_val = FFA_INVALID_PARAMETERS;
936 		break;
937 	default:
938 		DMSG("cookie %#"PRIx64" busy", cookie);
939 		ret_val = FFA_DENIED;
940 		break;
941 	}
942 out:
943 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
944 }
945 #endif
946 
947 /* Only called from assembly */
948 void thread_spmc_msg_recv(struct thread_smc_args *args);
949 void thread_spmc_msg_recv(struct thread_smc_args *args)
950 {
951 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
952 	switch (args->a0) {
953 #if defined(CFG_CORE_SEL1_SPMC)
954 	case FFA_VERSION:
955 		spmc_handle_version(args);
956 		break;
957 	case FFA_FEATURES:
958 		handle_features(args);
959 		break;
960 #ifdef ARM64
961 	case FFA_RXTX_MAP_64:
962 #endif
963 	case FFA_RXTX_MAP_32:
964 		spmc_handle_rxtx_map(args, &nw_rxtx);
965 		break;
966 	case FFA_RXTX_UNMAP:
967 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
968 		break;
969 	case FFA_RX_RELEASE:
970 		spmc_handle_rx_release(args, &nw_rxtx);
971 		break;
972 	case FFA_PARTITION_INFO_GET:
973 		spmc_handle_partition_info_get(args, &nw_rxtx);
974 		break;
975 #endif /*CFG_CORE_SEL1_SPMC*/
976 	case FFA_INTERRUPT:
977 		itr_core_handler();
978 		spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
979 		break;
980 #ifdef ARM64
981 	case FFA_MSG_SEND_DIRECT_REQ_64:
982 #endif
983 	case FFA_MSG_SEND_DIRECT_REQ_32:
984 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
985 		    FFA_DST(args->a1) != my_endpoint_id) {
986 			spmc_sp_start_thread(args);
987 			break;
988 		}
989 
990 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
991 			handle_yielding_call(args);
992 		else
993 			handle_blocking_call(args);
994 		break;
995 #if defined(CFG_CORE_SEL1_SPMC)
996 #ifdef ARM64
997 	case FFA_MEM_SHARE_64:
998 #endif
999 	case FFA_MEM_SHARE_32:
1000 		handle_mem_share(args, &nw_rxtx);
1001 		break;
1002 	case FFA_MEM_RECLAIM:
1003 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1004 		    !ffa_mem_reclaim(args, NULL))
1005 			handle_mem_reclaim(args);
1006 		break;
1007 	case FFA_MEM_FRAG_TX:
1008 		handle_mem_frag_tx(args, &nw_rxtx);
1009 		break;
1010 #endif /*CFG_CORE_SEL1_SPMC*/
1011 	default:
1012 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1013 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1014 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1015 	}
1016 }
1017 
1018 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1019 {
1020 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1021 	struct thread_ctx *thr = threads + thread_get_id();
1022 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1023 	struct optee_msg_arg *arg = NULL;
1024 	struct mobj *mobj = NULL;
1025 	uint32_t num_params = 0;
1026 	size_t sz = 0;
1027 
1028 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1029 	if (!mobj) {
1030 		EMSG("Can't find cookie %#"PRIx64, cookie);
1031 		return TEE_ERROR_BAD_PARAMETERS;
1032 	}
1033 
1034 	res = mobj_inc_map(mobj);
1035 	if (res)
1036 		goto out_put_mobj;
1037 
1038 	res = TEE_ERROR_BAD_PARAMETERS;
1039 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1040 	if (!arg)
1041 		goto out_dec_map;
1042 
1043 	num_params = READ_ONCE(arg->num_params);
1044 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1045 		goto out_dec_map;
1046 
1047 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1048 
1049 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1050 	if (!thr->rpc_arg)
1051 		goto out_dec_map;
1052 
1053 	res = tee_entry_std(arg, num_params);
1054 
1055 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1056 	thr->rpc_arg = NULL;
1057 
1058 out_dec_map:
1059 	mobj_dec_map(mobj);
1060 out_put_mobj:
1061 	mobj_put(mobj);
1062 	return res;
1063 }
1064 
1065 /*
1066  * Helper routine for the assembly function thread_std_smc_entry()
1067  *
1068  * Note: this function is weak just to make link_dummies_paged.c happy.
1069  */
1070 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1071 				       uint32_t a2, uint32_t a3,
1072 				       uint32_t a4, uint32_t a5 __unused)
1073 {
1074 	/*
1075 	 * Arguments are supplied from handle_yielding_call() as:
1076 	 * a0 <- w1
1077 	 * a1 <- w3
1078 	 * a2 <- w4
1079 	 * a3 <- w5
1080 	 * a4 <- w6
1081 	 * a5 <- w7
1082 	 */
1083 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1084 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1085 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1086 	return FFA_DENIED;
1087 }
1088 
1089 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1090 {
1091 	uint64_t offs = tpm->u.memref.offs;
1092 
1093 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1094 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1095 
1096 	param->u.fmem.offs_low = offs;
1097 	param->u.fmem.offs_high = offs >> 32;
1098 	if (param->u.fmem.offs_high != offs >> 32)
1099 		return false;
1100 
1101 	param->u.fmem.size = tpm->u.memref.size;
1102 	if (tpm->u.memref.mobj) {
1103 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1104 
1105 		/* If a mobj is passed it better be one with a valid cookie. */
1106 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1107 			return false;
1108 		param->u.fmem.global_id = cookie;
1109 	} else {
1110 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1111 	}
1112 
1113 	return true;
1114 }
1115 
1116 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1117 			    struct thread_param *params,
1118 			    struct optee_msg_arg **arg_ret)
1119 {
1120 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1121 	struct thread_ctx *thr = threads + thread_get_id();
1122 	struct optee_msg_arg *arg = thr->rpc_arg;
1123 
1124 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1125 		return TEE_ERROR_BAD_PARAMETERS;
1126 
1127 	if (!arg) {
1128 		EMSG("rpc_arg not set");
1129 		return TEE_ERROR_GENERIC;
1130 	}
1131 
1132 	memset(arg, 0, sz);
1133 	arg->cmd = cmd;
1134 	arg->num_params = num_params;
1135 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1136 
1137 	for (size_t n = 0; n < num_params; n++) {
1138 		switch (params[n].attr) {
1139 		case THREAD_PARAM_ATTR_NONE:
1140 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1141 			break;
1142 		case THREAD_PARAM_ATTR_VALUE_IN:
1143 		case THREAD_PARAM_ATTR_VALUE_OUT:
1144 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1145 			arg->params[n].attr = params[n].attr -
1146 					      THREAD_PARAM_ATTR_VALUE_IN +
1147 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1148 			arg->params[n].u.value.a = params[n].u.value.a;
1149 			arg->params[n].u.value.b = params[n].u.value.b;
1150 			arg->params[n].u.value.c = params[n].u.value.c;
1151 			break;
1152 		case THREAD_PARAM_ATTR_MEMREF_IN:
1153 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1154 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1155 			if (!set_fmem(arg->params + n, params + n))
1156 				return TEE_ERROR_BAD_PARAMETERS;
1157 			break;
1158 		default:
1159 			return TEE_ERROR_BAD_PARAMETERS;
1160 		}
1161 	}
1162 
1163 	if (arg_ret)
1164 		*arg_ret = arg;
1165 
1166 	return TEE_SUCCESS;
1167 }
1168 
1169 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1170 				struct thread_param *params)
1171 {
1172 	for (size_t n = 0; n < num_params; n++) {
1173 		switch (params[n].attr) {
1174 		case THREAD_PARAM_ATTR_VALUE_OUT:
1175 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1176 			params[n].u.value.a = arg->params[n].u.value.a;
1177 			params[n].u.value.b = arg->params[n].u.value.b;
1178 			params[n].u.value.c = arg->params[n].u.value.c;
1179 			break;
1180 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1181 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1182 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1183 			break;
1184 		default:
1185 			break;
1186 		}
1187 	}
1188 
1189 	return arg->ret;
1190 }
1191 
1192 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1193 			struct thread_param *params)
1194 {
1195 	struct thread_rpc_arg rpc_arg = { .call = {
1196 			.w1 = thread_get_tsd()->rpc_target_info,
1197 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1198 		},
1199 	};
1200 	struct optee_msg_arg *arg = NULL;
1201 	uint32_t ret = 0;
1202 
1203 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1204 	if (ret)
1205 		return ret;
1206 
1207 	thread_rpc(&rpc_arg);
1208 
1209 	return get_rpc_arg_res(arg, num_params, params);
1210 }
1211 
1212 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1213 {
1214 	struct thread_rpc_arg rpc_arg = { .call = {
1215 			.w1 = thread_get_tsd()->rpc_target_info,
1216 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1217 		},
1218 	};
1219 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1220 	uint32_t res2 = 0;
1221 	uint32_t res = 0;
1222 
1223 	DMSG("freeing cookie %#"PRIx64, cookie);
1224 
1225 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1226 
1227 	mobj_put(mobj);
1228 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1229 	if (res2)
1230 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1231 		     cookie, res2);
1232 	if (!res)
1233 		thread_rpc(&rpc_arg);
1234 }
1235 
1236 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1237 {
1238 	struct thread_rpc_arg rpc_arg = { .call = {
1239 			.w1 = thread_get_tsd()->rpc_target_info,
1240 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1241 		},
1242 	};
1243 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1244 	struct optee_msg_arg *arg = NULL;
1245 	unsigned int internal_offset = 0;
1246 	struct mobj *mobj = NULL;
1247 	uint64_t cookie = 0;
1248 
1249 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1250 		return NULL;
1251 
1252 	thread_rpc(&rpc_arg);
1253 
1254 	if (arg->num_params != 1 ||
1255 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1256 		return NULL;
1257 
1258 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1259 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1260 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1261 	if (!mobj) {
1262 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1263 		     cookie, internal_offset);
1264 		return NULL;
1265 	}
1266 
1267 	assert(mobj_is_nonsec(mobj));
1268 
1269 	if (mobj->size < size) {
1270 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1271 		mobj_put(mobj);
1272 		return NULL;
1273 	}
1274 
1275 	if (mobj_inc_map(mobj)) {
1276 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1277 		mobj_put(mobj);
1278 		return NULL;
1279 	}
1280 
1281 	return mobj;
1282 }
1283 
1284 struct mobj *thread_rpc_alloc_payload(size_t size)
1285 {
1286 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1287 }
1288 
1289 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1290 {
1291 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1292 }
1293 
1294 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1295 {
1296 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1297 }
1298 
1299 void thread_rpc_free_payload(struct mobj *mobj)
1300 {
1301 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1302 			mobj);
1303 }
1304 
1305 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1306 {
1307 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1308 }
1309 
1310 void thread_rpc_free_global_payload(struct mobj *mobj)
1311 {
1312 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1313 			mobj);
1314 }
1315 
1316 void thread_spmc_register_secondary_ep(vaddr_t ep)
1317 {
1318 	unsigned long ret = 0;
1319 
1320 	/* Let the SPM know the entry point for secondary CPUs */
1321 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
1322 
1323 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
1324 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
1325 }
1326 
1327 #if defined(CFG_CORE_SEL1_SPMC)
1328 static TEE_Result spmc_init(void)
1329 {
1330 	my_endpoint_id = SPMC_ENDPOINT_ID;
1331 	DMSG("My endpoint ID %#x", my_endpoint_id);
1332 
1333 	return TEE_SUCCESS;
1334 }
1335 #else /* !defined(CFG_CORE_SEL1_SPMC) */
1336 static bool is_ffa_success(uint32_t fid)
1337 {
1338 #ifdef ARM64
1339 	if (fid == FFA_SUCCESS_64)
1340 		return true;
1341 #endif
1342 	return fid == FFA_SUCCESS_32;
1343 }
1344 
1345 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
1346 {
1347 	struct thread_smc_args args = {
1348 #ifdef ARM64
1349 		.a0 = FFA_RXTX_MAP_64,
1350 #else
1351 		.a0 = FFA_RXTX_MAP_32,
1352 #endif
1353 		.a1 = virt_to_phys(rxtx->tx),
1354 		.a2 = virt_to_phys(rxtx->rx),
1355 		.a3 = 1,
1356 	};
1357 
1358 	thread_smccc(&args);
1359 	if (!is_ffa_success(args.a0)) {
1360 		if (args.a0 == FFA_ERROR)
1361 			EMSG("rxtx map failed with error %ld", args.a2);
1362 		else
1363 			EMSG("rxtx map failed");
1364 		panic();
1365 	}
1366 }
1367 
1368 static uint16_t spmc_get_id(void)
1369 {
1370 	struct thread_smc_args args = {
1371 		.a0 = FFA_ID_GET,
1372 	};
1373 
1374 	thread_smccc(&args);
1375 	if (!is_ffa_success(args.a0)) {
1376 		if (args.a0 == FFA_ERROR)
1377 			EMSG("Get id failed with error %ld", args.a2);
1378 		else
1379 			EMSG("Get id failed");
1380 		panic();
1381 	}
1382 
1383 	return args.a2;
1384 }
1385 
1386 static struct ffa_mem_transaction *spmc_retrieve_req(uint64_t cookie)
1387 {
1388 	struct ffa_mem_transaction *trans_descr = nw_rxtx.tx;
1389 	struct ffa_mem_access *acc_descr_array = NULL;
1390 	struct ffa_mem_access_perm *perm_descr = NULL;
1391 	size_t size = sizeof(*trans_descr) +
1392 		      1 * sizeof(struct ffa_mem_access);
1393 	struct thread_smc_args args = {
1394 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
1395 		.a1 =   size,	/* Total Length */
1396 		.a2 =	size,	/* Frag Length == Total length */
1397 		.a3 =	0,	/* Address, Using TX -> MBZ */
1398 		.a4 =   0,	/* Using TX -> MBZ */
1399 	};
1400 
1401 	memset(trans_descr, 0, size);
1402 	trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
1403 	trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
1404 	trans_descr->global_handle = cookie;
1405 	trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
1406 			     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
1407 	trans_descr->mem_access_count = 1;
1408 	acc_descr_array = trans_descr->mem_access_array;
1409 	acc_descr_array->region_offs = 0;
1410 	acc_descr_array->reserved = 0;
1411 	perm_descr = &acc_descr_array->access_perm;
1412 	perm_descr->endpoint_id = my_endpoint_id;
1413 	perm_descr->perm = FFA_MEM_ACC_RW;
1414 	perm_descr->flags = 0;
1415 
1416 	thread_smccc(&args);
1417 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
1418 		if (args.a0 == FFA_ERROR)
1419 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
1420 			     cookie, (int)args.a2);
1421 		else
1422 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
1423 			     cookie, args.a0);
1424 		return NULL;
1425 	}
1426 
1427 	return nw_rxtx.rx;
1428 }
1429 
1430 void thread_spmc_relinquish(uint64_t cookie)
1431 {
1432 	struct ffa_mem_relinquish *relinquish_desc = nw_rxtx.tx;
1433 	struct thread_smc_args args = {
1434 		.a0 = FFA_MEM_RELINQUISH,
1435 	};
1436 
1437 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
1438 	relinquish_desc->handle = cookie;
1439 	relinquish_desc->flags = 0;
1440 	relinquish_desc->endpoint_count = 1;
1441 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
1442 	thread_smccc(&args);
1443 	if (!is_ffa_success(args.a0))
1444 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
1445 }
1446 
1447 static int set_pages(struct ffa_address_range *regions,
1448 		     unsigned int num_regions, unsigned int num_pages,
1449 		     struct mobj_ffa *mf)
1450 {
1451 	unsigned int n = 0;
1452 	unsigned int idx = 0;
1453 
1454 	for (n = 0; n < num_regions; n++) {
1455 		unsigned int page_count = READ_ONCE(regions[n].page_count);
1456 		uint64_t addr = READ_ONCE(regions[n].address);
1457 
1458 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
1459 			return FFA_INVALID_PARAMETERS;
1460 	}
1461 
1462 	if (idx != num_pages)
1463 		return FFA_INVALID_PARAMETERS;
1464 
1465 	return 0;
1466 }
1467 
1468 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
1469 {
1470 	struct mobj_ffa *ret = NULL;
1471 	struct ffa_mem_transaction *retrieve_desc = NULL;
1472 	struct ffa_mem_access *descr_array = NULL;
1473 	struct ffa_mem_region *descr = NULL;
1474 	struct mobj_ffa *mf = NULL;
1475 	unsigned int num_pages = 0;
1476 	unsigned int offs = 0;
1477 	struct thread_smc_args ffa_rx_release_args = {
1478 		.a0 = FFA_RX_RELEASE
1479 	};
1480 
1481 	/*
1482 	 * OP-TEE is only supporting a single mem_region while the
1483 	 * specification allows for more than one.
1484 	 */
1485 	retrieve_desc = spmc_retrieve_req(cookie);
1486 	if (!retrieve_desc) {
1487 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
1488 		     cookie);
1489 		return NULL;
1490 	}
1491 
1492 	descr_array = retrieve_desc->mem_access_array;
1493 	offs = READ_ONCE(descr_array->region_offs);
1494 	descr = (struct ffa_mem_region *)((vaddr_t)retrieve_desc + offs);
1495 
1496 	num_pages = READ_ONCE(descr->total_page_count);
1497 	mf = mobj_ffa_spmc_new(cookie, num_pages);
1498 	if (!mf)
1499 		goto out;
1500 
1501 	if (set_pages(descr->address_range_array,
1502 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
1503 		mobj_ffa_spmc_delete(mf);
1504 		goto out;
1505 	}
1506 
1507 	ret = mf;
1508 
1509 out:
1510 	/* Release RX buffer after the mem retrieve request. */
1511 	thread_smccc(&ffa_rx_release_args);
1512 
1513 	return ret;
1514 }
1515 
1516 static TEE_Result spmc_init(void)
1517 {
1518 	spmc_rxtx_map(&nw_rxtx);
1519 	my_endpoint_id = spmc_get_id();
1520 	DMSG("My endpoint ID %#x", my_endpoint_id);
1521 
1522 	return TEE_SUCCESS;
1523 }
1524 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
1525 
1526 service_init(spmc_init);
1527