xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision c1bdf4fc0c68e4555eaddbc7c1944d48d4637287)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/secure_partition.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/spmc_sp_handler.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_spmc.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <optee_ffa.h>
21 #include <optee_msg.h>
22 #include <optee_rpc_cmd.h>
23 #include <string.h>
24 #include <sys/queue.h>
25 #include <tee/entry_std.h>
26 #include <util.h>
27 
28 #include "thread_private.h"
29 
30 struct mem_share_state {
31 	struct mobj_ffa *mf;
32 	unsigned int page_count;
33 	unsigned int region_count;
34 	unsigned int current_page_idx;
35 };
36 
37 struct mem_frag_state {
38 	struct mem_share_state share;
39 	tee_mm_entry_t *mm;
40 	unsigned int frag_offset;
41 	SLIST_ENTRY(mem_frag_state) link;
42 };
43 
44 /*
45  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
46  *
47  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
48  * access this includes the use of content of struct ffa_rxtx::rx and
49  * @frag_state_head.
50  *
51  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
52  * ffa_rxtx::tx and false when it is owned by normal world.
53  *
54  * Note that we can't prevent normal world from updating the content of
55  * these buffers so we must always be careful when reading. while we hold
56  * the lock.
57  */
58 
59 static struct ffa_rxtx nw_rxtx;
60 
61 static bool is_nw_buf(struct ffa_rxtx *rxtx)
62 {
63 	return rxtx == &nw_rxtx;
64 }
65 
66 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
67 	SLIST_HEAD_INITIALIZER(&frag_state_head);
68 
69 static uint32_t swap_src_dst(uint32_t src_dst)
70 {
71 	return (src_dst >> 16) | (src_dst << 16);
72 }
73 
74 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
75 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
76 {
77 	*args = (struct thread_smc_args){ .a0 = fid,
78 					  .a1 = src_dst,
79 					  .a2 = w2,
80 					  .a3 = w3,
81 					  .a4 = w4,
82 					  .a5 = w5, };
83 }
84 
85 void spmc_handle_version(struct thread_smc_args *args)
86 {
87 	/*
88 	 * We currently only support one version, 1.0 so let's keep it
89 	 * simple.
90 	 */
91 	spmc_set_args(args,
92 		      MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
93 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
94 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
95 }
96 
97 static void handle_features(struct thread_smc_args *args)
98 {
99 	uint32_t ret_fid = 0;
100 	uint32_t ret_w2 = FFA_PARAM_MBZ;
101 
102 	switch (args->a1) {
103 #ifdef ARM64
104 	case FFA_RXTX_MAP_64:
105 #endif
106 	case FFA_RXTX_MAP_32:
107 		ret_fid = FFA_SUCCESS_32;
108 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
109 		break;
110 #ifdef ARM64
111 	case FFA_MEM_SHARE_64:
112 #endif
113 	case FFA_MEM_SHARE_32:
114 		ret_fid = FFA_SUCCESS_32;
115 		/*
116 		 * Partition manager supports transmission of a memory
117 		 * transaction descriptor in a buffer dynamically allocated
118 		 * by the endpoint.
119 		 */
120 		ret_w2 = BIT(0);
121 		break;
122 
123 	case FFA_ERROR:
124 	case FFA_VERSION:
125 	case FFA_SUCCESS_32:
126 #ifdef ARM64
127 	case FFA_SUCCESS_64:
128 #endif
129 	case FFA_MEM_FRAG_TX:
130 	case FFA_MEM_RECLAIM:
131 	case FFA_MSG_SEND_DIRECT_REQ_32:
132 	case FFA_INTERRUPT:
133 	case FFA_PARTITION_INFO_GET:
134 	case FFA_RX_RELEASE:
135 		ret_fid = FFA_SUCCESS_32;
136 		break;
137 	default:
138 		ret_fid = FFA_ERROR;
139 		ret_w2 = FFA_NOT_SUPPORTED;
140 		break;
141 	}
142 
143 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
144 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
145 }
146 
147 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
148 {
149 	tee_mm_entry_t *mm = NULL;
150 
151 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
152 		return FFA_INVALID_PARAMETERS;
153 
154 	mm = tee_mm_alloc(&tee_mm_shm, sz);
155 	if (!mm)
156 		return FFA_NO_MEMORY;
157 
158 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
159 					  sz / SMALL_PAGE_SIZE,
160 					  MEM_AREA_NSEC_SHM)) {
161 		tee_mm_free(mm);
162 		return FFA_INVALID_PARAMETERS;
163 	}
164 
165 	*va_ret = (void *)tee_mm_get_smem(mm);
166 	return 0;
167 }
168 
169 static void unmap_buf(void *va, size_t sz)
170 {
171 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
172 
173 	assert(mm);
174 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
175 	tee_mm_free(mm);
176 }
177 
178 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
179 {
180 	int rc = 0;
181 	uint32_t ret_fid = FFA_ERROR;
182 	unsigned int sz = 0;
183 	paddr_t rx_pa = 0;
184 	paddr_t tx_pa = 0;
185 	void *rx = NULL;
186 	void *tx = NULL;
187 
188 	cpu_spin_lock(&rxtx->spinlock);
189 
190 	if (args->a3 & GENMASK_64(63, 6)) {
191 		rc = FFA_INVALID_PARAMETERS;
192 		goto out;
193 	}
194 
195 	sz = args->a3 * SMALL_PAGE_SIZE;
196 	if (!sz) {
197 		rc = FFA_INVALID_PARAMETERS;
198 		goto out;
199 	}
200 	/* TX/RX are swapped compared to the caller */
201 	tx_pa = args->a2;
202 	rx_pa = args->a1;
203 
204 	if (rxtx->size) {
205 		rc = FFA_DENIED;
206 		goto out;
207 	}
208 
209 	/*
210 	 * If the buffer comes from a SP the address is virtual and already
211 	 * mapped.
212 	 */
213 	if (is_nw_buf(rxtx)) {
214 		rc = map_buf(tx_pa, sz, &tx);
215 		if (rc)
216 			goto out;
217 		rc = map_buf(rx_pa, sz, &rx);
218 		if (rc) {
219 			unmap_buf(tx, sz);
220 			goto out;
221 		}
222 		rxtx->tx = tx;
223 		rxtx->rx = rx;
224 	} else {
225 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
226 			rc = FFA_INVALID_PARAMETERS;
227 			goto out;
228 		}
229 
230 		if (!virt_to_phys((void *)tx_pa) ||
231 		    !virt_to_phys((void *)rx_pa)) {
232 			rc = FFA_INVALID_PARAMETERS;
233 			goto out;
234 		}
235 
236 		rxtx->tx = (void *)tx_pa;
237 		rxtx->rx = (void *)rx_pa;
238 	}
239 
240 	rxtx->size = sz;
241 	rxtx->tx_is_mine = true;
242 	ret_fid = FFA_SUCCESS_32;
243 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
244 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
245 out:
246 	cpu_spin_unlock(&rxtx->spinlock);
247 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
248 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
249 }
250 
251 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
252 {
253 	uint32_t ret_fid = FFA_ERROR;
254 	int rc = FFA_INVALID_PARAMETERS;
255 
256 	cpu_spin_lock(&rxtx->spinlock);
257 
258 	if (!rxtx->size)
259 		goto out;
260 
261 	/* We don't unmap the SP memory as the SP might still use it */
262 	if (is_nw_buf(rxtx)) {
263 		unmap_buf(rxtx->rx, rxtx->size);
264 		unmap_buf(rxtx->tx, rxtx->size);
265 	}
266 	rxtx->size = 0;
267 	rxtx->rx = NULL;
268 	rxtx->tx = NULL;
269 	ret_fid = FFA_SUCCESS_32;
270 	rc = 0;
271 out:
272 	cpu_spin_unlock(&rxtx->spinlock);
273 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
274 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
275 }
276 
277 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
278 {
279 	uint32_t ret_fid = 0;
280 	int rc = 0;
281 
282 	cpu_spin_lock(&rxtx->spinlock);
283 	/* The senders RX is our TX */
284 	if (!rxtx->size || rxtx->tx_is_mine) {
285 		ret_fid = FFA_ERROR;
286 		rc = FFA_DENIED;
287 	} else {
288 		ret_fid = FFA_SUCCESS_32;
289 		rc = 0;
290 		rxtx->tx_is_mine = true;
291 	}
292 	cpu_spin_unlock(&rxtx->spinlock);
293 
294 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
295 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
296 }
297 
298 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
299 {
300 	return !w0 && !w1 && !w2 && !w3;
301 }
302 
303 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
304 {
305 	/*
306 	 * This depends on which UUID we have been assigned.
307 	 * TODO add a generic mechanism to obtain our UUID.
308 	 *
309 	 * The test below is for the hard coded UUID
310 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
311 	 */
312 	return w0 == 0xe0786148 && w1 == 0xe311f8e7 &&
313 	       w2 == 0x02005ebc && w3 == 0x1bc5d5a5;
314 }
315 
316 static void handle_partition_info_get(struct thread_smc_args *args,
317 				      struct ffa_rxtx *rxtx)
318 {
319 	uint32_t ret_fid = 0;
320 	int rc = 0;
321 
322 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
323 	    !is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
324 		ret_fid = FFA_ERROR;
325 		rc = FFA_INVALID_PARAMETERS;
326 		goto out;
327 	}
328 
329 	cpu_spin_lock(&rxtx->spinlock);
330 	if (rxtx->size && rxtx->tx_is_mine) {
331 		struct ffa_partition_info *fpi = rxtx->tx;
332 
333 		fpi->id = SPMC_ENDPOINT_ID;
334 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
335 		fpi->partition_properties = BIT(0) | BIT(1);
336 
337 		ret_fid = FFA_SUCCESS_32;
338 		rc = 1;
339 		rxtx->tx_is_mine = false;
340 	} else {
341 		ret_fid = FFA_ERROR;
342 		if (rxtx->size)
343 			rc = FFA_BUSY;
344 		else
345 			rc = FFA_DENIED; /* TX buffer not setup yet */
346 	}
347 	cpu_spin_unlock(&rxtx->spinlock);
348 
349 out:
350 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
351 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
352 }
353 
354 static void handle_yielding_call(struct thread_smc_args *args)
355 {
356 	TEE_Result res = 0;
357 
358 	thread_check_canaries();
359 
360 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
361 		/* Note connection to struct thread_rpc_arg::ret */
362 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
363 				       0);
364 		res = TEE_ERROR_BAD_PARAMETERS;
365 	} else {
366 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
367 				     args->a6, args->a7);
368 		res = TEE_ERROR_BUSY;
369 	}
370 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
371 		      swap_src_dst(args->a1), 0, res, 0, 0);
372 }
373 
374 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
375 {
376 	uint64_t cookie = reg_pair_to_64(a5, a4);
377 	uint32_t res = 0;
378 
379 	res = mobj_ffa_unregister_by_cookie(cookie);
380 	switch (res) {
381 	case TEE_SUCCESS:
382 	case TEE_ERROR_ITEM_NOT_FOUND:
383 		return 0;
384 	case TEE_ERROR_BUSY:
385 		EMSG("res %#"PRIx32, res);
386 		return FFA_BUSY;
387 	default:
388 		EMSG("res %#"PRIx32, res);
389 		return FFA_INVALID_PARAMETERS;
390 	}
391 }
392 
393 static void handle_blocking_call(struct thread_smc_args *args)
394 {
395 	switch (args->a3) {
396 	case OPTEE_FFA_GET_API_VERSION:
397 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
398 			      swap_src_dst(args->a1), 0,
399 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
400 			      0);
401 		break;
402 	case OPTEE_FFA_GET_OS_VERSION:
403 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
404 			      swap_src_dst(args->a1), 0,
405 			      CFG_OPTEE_REVISION_MAJOR,
406 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
407 		break;
408 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
409 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
410 			      swap_src_dst(args->a1), 0, 0,
411 			      THREAD_RPC_MAX_NUM_PARAMS, 0);
412 		break;
413 	case OPTEE_FFA_UNREGISTER_SHM:
414 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
415 			      swap_src_dst(args->a1), 0,
416 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
417 		break;
418 	default:
419 		EMSG("Unhandled blocking service ID %#"PRIx32,
420 		     (uint32_t)args->a3);
421 		panic();
422 	}
423 }
424 
425 static int get_acc_perms(struct ffa_mem_access *mem_acc,
426 			 unsigned int num_mem_accs, uint8_t *acc_perms,
427 			 unsigned int *region_offs)
428 {
429 	unsigned int n = 0;
430 
431 	for (n = 0; n < num_mem_accs; n++) {
432 		struct ffa_mem_access_perm *descr = &mem_acc[n].access_perm;
433 
434 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
435 			*acc_perms = READ_ONCE(descr->perm);
436 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
437 			return 0;
438 		}
439 	}
440 
441 	return FFA_INVALID_PARAMETERS;
442 }
443 
444 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
445 			  unsigned int *region_count, size_t *addr_range_offs)
446 {
447 	const uint8_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
448 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
449 	struct ffa_mem_region *region_descr = NULL;
450 	struct ffa_mem_transaction *descr = NULL;
451 	unsigned int num_mem_accs = 0;
452 	uint8_t mem_acc_perm = 0;
453 	unsigned int region_descr_offs = 0;
454 	size_t n = 0;
455 
456 	if (!ALIGNMENT_IS_OK(buf, struct ffa_mem_transaction) ||
457 	    blen < sizeof(struct ffa_mem_transaction))
458 		return FFA_INVALID_PARAMETERS;
459 
460 	descr = buf;
461 
462 	/* Check that the endpoint memory access descriptor array fits */
463 	num_mem_accs = READ_ONCE(descr->mem_access_count);
464 	if (MUL_OVERFLOW(sizeof(struct ffa_mem_access), num_mem_accs, &n) ||
465 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
466 		return FFA_INVALID_PARAMETERS;
467 
468 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
469 		return FFA_INVALID_PARAMETERS;
470 
471 	/* Check that the access permissions matches what's expected */
472 	if (get_acc_perms(descr->mem_access_array,
473 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
474 	    mem_acc_perm != exp_mem_acc_perm)
475 		return FFA_INVALID_PARAMETERS;
476 
477 	/* Check that the Composite memory region descriptor fits */
478 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
479 	    n > blen)
480 		return FFA_INVALID_PARAMETERS;
481 
482 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
483 			     struct ffa_mem_region))
484 		return FFA_INVALID_PARAMETERS;
485 
486 	region_descr = (struct ffa_mem_region *)((vaddr_t)descr +
487 						 region_descr_offs);
488 	*page_count = READ_ONCE(region_descr->total_page_count);
489 	*region_count = READ_ONCE(region_descr->address_range_count);
490 	*addr_range_offs = n;
491 	return 0;
492 }
493 
494 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
495 				size_t flen)
496 {
497 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
498 	struct ffa_address_range *arange = NULL;
499 	unsigned int n = 0;
500 
501 	if (region_count > s->region_count)
502 		region_count = s->region_count;
503 
504 	if (!ALIGNMENT_IS_OK(buf, struct ffa_address_range))
505 		return FFA_INVALID_PARAMETERS;
506 	arange = buf;
507 
508 	for (n = 0; n < region_count; n++) {
509 		unsigned int page_count = READ_ONCE(arange[n].page_count);
510 		uint64_t addr = READ_ONCE(arange[n].address);
511 
512 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
513 					  addr, page_count))
514 			return FFA_INVALID_PARAMETERS;
515 	}
516 
517 	s->region_count -= region_count;
518 	if (s->region_count)
519 		return region_count * sizeof(*arange);
520 
521 	if (s->current_page_idx != s->page_count)
522 		return FFA_INVALID_PARAMETERS;
523 
524 	return 0;
525 }
526 
527 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
528 {
529 	int rc = 0;
530 
531 	rc = add_mem_share_helper(&s->share, buf, flen);
532 	if (rc >= 0) {
533 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
534 			if (s->share.region_count)
535 				return s->frag_offset;
536 			/* We're done, return the number of consumed bytes */
537 			rc = s->frag_offset;
538 		} else {
539 			rc = FFA_INVALID_PARAMETERS;
540 		}
541 	}
542 
543 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
544 	if (rc < 0)
545 		mobj_ffa_sel1_spmc_delete(s->share.mf);
546 	else
547 		mobj_ffa_push_to_inactive(s->share.mf);
548 	free(s);
549 
550 	return rc;
551 }
552 
553 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
554 			 size_t flen, uint64_t *global_handle)
555 {
556 	int rc = 0;
557 	struct mem_share_state share = { };
558 	size_t addr_range_offs = 0;
559 	size_t n = 0;
560 
561 	if (flen > blen)
562 		return FFA_INVALID_PARAMETERS;
563 
564 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
565 			    &addr_range_offs);
566 	if (rc)
567 		return rc;
568 
569 	if (MUL_OVERFLOW(share.region_count,
570 			 sizeof(struct ffa_address_range), &n) ||
571 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
572 		return FFA_INVALID_PARAMETERS;
573 
574 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
575 	if (!share.mf)
576 		return FFA_NO_MEMORY;
577 
578 	if (flen != blen) {
579 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
580 
581 		if (!s) {
582 			rc = FFA_NO_MEMORY;
583 			goto err;
584 		}
585 		s->share = share;
586 		s->mm = mm;
587 		s->frag_offset = addr_range_offs;
588 
589 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
590 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
591 					flen - addr_range_offs);
592 
593 		if (rc >= 0)
594 			*global_handle = mobj_ffa_get_cookie(share.mf);
595 
596 		return rc;
597 	}
598 
599 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
600 				  flen - addr_range_offs);
601 	if (rc) {
602 		/*
603 		 * Number of consumed bytes may be returned instead of 0 for
604 		 * done.
605 		 */
606 		rc = FFA_INVALID_PARAMETERS;
607 		goto err;
608 	}
609 
610 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
611 
612 	return 0;
613 err:
614 	mobj_ffa_sel1_spmc_delete(share.mf);
615 	return rc;
616 }
617 
618 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
619 				 unsigned int page_count,
620 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
621 {
622 	int rc = 0;
623 	size_t len = 0;
624 	tee_mm_entry_t *mm = NULL;
625 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
626 
627 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
628 		return FFA_INVALID_PARAMETERS;
629 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
630 		return FFA_INVALID_PARAMETERS;
631 
632 	/*
633 	 * Check that the length reported in blen is covered by len even
634 	 * if the offset is taken into account.
635 	 */
636 	if (len < blen || len - offs < blen)
637 		return FFA_INVALID_PARAMETERS;
638 
639 	mm = tee_mm_alloc(&tee_mm_shm, len);
640 	if (!mm)
641 		return FFA_NO_MEMORY;
642 
643 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
644 					  page_count, MEM_AREA_NSEC_SHM)) {
645 		rc = FFA_INVALID_PARAMETERS;
646 		goto out;
647 	}
648 
649 	cpu_spin_lock(&rxtx->spinlock);
650 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
651 			   global_handle);
652 	cpu_spin_unlock(&rxtx->spinlock);
653 	if (rc > 0)
654 		return rc;
655 
656 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
657 out:
658 	tee_mm_free(mm);
659 	return rc;
660 }
661 
662 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
663 				  uint64_t *global_handle,
664 				  struct ffa_rxtx *rxtx)
665 {
666 	int rc = FFA_DENIED;
667 
668 	cpu_spin_lock(&rxtx->spinlock);
669 
670 	if (rxtx->rx && flen <= rxtx->size)
671 		rc = add_mem_share(NULL, rxtx->rx, blen, flen, global_handle);
672 
673 	cpu_spin_unlock(&rxtx->spinlock);
674 
675 	return rc;
676 }
677 
678 static void handle_mem_share(struct thread_smc_args *args,
679 			     struct ffa_rxtx *rxtx)
680 {
681 	uint32_t ret_w1 = 0;
682 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
683 	uint32_t ret_w3 = 0;
684 	uint32_t ret_fid = FFA_ERROR;
685 	uint64_t global_handle = 0;
686 	int rc = 0;
687 
688 	/* Check that the MBZs are indeed 0 */
689 	if (args->a5 || args->a6 || args->a7)
690 		goto out;
691 
692 	if (!args->a3) {
693 		/*
694 		 * The memory transaction descriptor is passed via our rx
695 		 * buffer.
696 		 */
697 		if (args->a4)
698 			goto out;
699 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
700 					    rxtx);
701 	} else {
702 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
703 					   args->a4, &global_handle, rxtx);
704 	}
705 	if (rc < 0) {
706 		ret_w2 = rc;
707 		goto out;
708 	}
709 	if (rc > 0) {
710 		ret_fid = FFA_MEM_FRAG_RX;
711 		ret_w3 = rc;
712 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
713 	}
714 	ret_fid = FFA_SUCCESS_32;
715 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
716 out:
717 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
718 }
719 
720 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
721 {
722 	struct mem_frag_state *s = NULL;
723 
724 	SLIST_FOREACH(s, &frag_state_head, link)
725 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
726 			return s;
727 
728 	return NULL;
729 }
730 
731 static void handle_mem_frag_tx(struct thread_smc_args *args,
732 			       struct ffa_rxtx *rxtx)
733 {
734 	int rc = 0;
735 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
736 						READ_ONCE(args->a1));
737 	size_t flen = READ_ONCE(args->a3);
738 	struct mem_frag_state *s = NULL;
739 	tee_mm_entry_t *mm = NULL;
740 	unsigned int page_count = 0;
741 	void *buf = NULL;
742 	uint32_t ret_w1 = 0;
743 	uint32_t ret_w2 = 0;
744 	uint32_t ret_w3 = 0;
745 	uint32_t ret_fid = 0;
746 
747 	/*
748 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
749 	 * requests.
750 	 */
751 
752 	cpu_spin_lock(&rxtx->spinlock);
753 
754 	s = get_frag_state(global_handle);
755 	if (!s) {
756 		rc = FFA_INVALID_PARAMETERS;
757 		goto out;
758 	}
759 
760 	mm = s->mm;
761 	if (mm) {
762 		if (flen > tee_mm_get_bytes(mm)) {
763 			rc = FFA_INVALID_PARAMETERS;
764 			goto out;
765 		}
766 		page_count = s->share.page_count;
767 		buf = (void *)tee_mm_get_smem(mm);
768 	} else {
769 		if (flen > rxtx->size) {
770 			rc = FFA_INVALID_PARAMETERS;
771 			goto out;
772 		}
773 		buf = rxtx->rx;
774 	}
775 
776 	rc = add_mem_share_frag(s, buf, flen);
777 out:
778 	cpu_spin_unlock(&rxtx->spinlock);
779 
780 	if (rc <= 0 && mm) {
781 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
782 		tee_mm_free(mm);
783 	}
784 
785 	if (rc < 0) {
786 		ret_fid = FFA_ERROR;
787 		ret_w2 = rc;
788 	} else if (rc > 0) {
789 		ret_fid = FFA_MEM_FRAG_RX;
790 		ret_w3 = rc;
791 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
792 	} else {
793 		ret_fid = FFA_SUCCESS_32;
794 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
795 	}
796 
797 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
798 }
799 
800 static void handle_mem_reclaim(struct thread_smc_args *args)
801 {
802 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
803 	uint32_t ret_fid = FFA_ERROR;
804 	uint64_t cookie = 0;
805 
806 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
807 		goto out;
808 
809 	cookie = reg_pair_to_64(args->a2, args->a1);
810 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
811 	case TEE_SUCCESS:
812 		ret_fid = FFA_SUCCESS_32;
813 		ret_val = 0;
814 		break;
815 	case TEE_ERROR_ITEM_NOT_FOUND:
816 		DMSG("cookie %#"PRIx64" not found", cookie);
817 		ret_val = FFA_INVALID_PARAMETERS;
818 		break;
819 	default:
820 		DMSG("cookie %#"PRIx64" busy", cookie);
821 		ret_val = FFA_DENIED;
822 		break;
823 	}
824 out:
825 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
826 }
827 
828 /* Only called from assembly */
829 void thread_spmc_msg_recv(struct thread_smc_args *args);
830 void thread_spmc_msg_recv(struct thread_smc_args *args)
831 {
832 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
833 	switch (args->a0) {
834 	case FFA_VERSION:
835 		spmc_handle_version(args);
836 		break;
837 	case FFA_FEATURES:
838 		handle_features(args);
839 		break;
840 #ifdef ARM64
841 	case FFA_RXTX_MAP_64:
842 #endif
843 	case FFA_RXTX_MAP_32:
844 		spmc_handle_rxtx_map(args, &nw_rxtx);
845 		break;
846 	case FFA_RXTX_UNMAP:
847 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
848 		break;
849 	case FFA_RX_RELEASE:
850 		spmc_handle_rx_release(args, &nw_rxtx);
851 		break;
852 	case FFA_PARTITION_INFO_GET:
853 		handle_partition_info_get(args, &nw_rxtx);
854 		break;
855 	case FFA_INTERRUPT:
856 		itr_core_handler();
857 		spmc_set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
858 		break;
859 	case FFA_MSG_SEND_DIRECT_REQ_32:
860 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
861 		    FFA_DST(args->a1) != SPMC_ENDPOINT_ID) {
862 			spmc_sp_start_thread(args);
863 			break;
864 		}
865 
866 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
867 			handle_yielding_call(args);
868 		else
869 			handle_blocking_call(args);
870 		break;
871 #ifdef ARM64
872 	case FFA_MEM_SHARE_64:
873 #endif
874 	case FFA_MEM_SHARE_32:
875 		handle_mem_share(args, &nw_rxtx);
876 		break;
877 	case FFA_MEM_RECLAIM:
878 		handle_mem_reclaim(args);
879 		break;
880 	case FFA_MEM_FRAG_TX:
881 		handle_mem_frag_tx(args, &nw_rxtx);
882 		break;
883 	default:
884 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
885 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
886 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
887 	}
888 }
889 
890 static uint32_t yielding_call_with_arg(uint64_t cookie, uint32_t offset)
891 {
892 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
893 	struct thread_ctx *thr = threads + thread_get_id();
894 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
895 	struct optee_msg_arg *arg = NULL;
896 	struct mobj *mobj = NULL;
897 	uint32_t num_params = 0;
898 	size_t sz = 0;
899 
900 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
901 	if (!mobj) {
902 		EMSG("Can't find cookie %#"PRIx64, cookie);
903 		return TEE_ERROR_BAD_PARAMETERS;
904 	}
905 
906 	rv = mobj_inc_map(mobj);
907 	if (rv)
908 		goto out_put_mobj;
909 
910 	rv = TEE_ERROR_BAD_PARAMETERS;
911 	arg = mobj_get_va(mobj, offset);
912 	if (!arg)
913 		goto out_dec_map;
914 
915 	if (!mobj_get_va(mobj, sizeof(*arg)))
916 		goto out_dec_map;
917 
918 	num_params = READ_ONCE(arg->num_params);
919 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
920 		goto out_dec_map;
921 
922 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
923 	thr->rpc_arg = mobj_get_va(mobj, offset + sz);
924 	if (!thr->rpc_arg || !mobj_get_va(mobj, offset + sz + sz_rpc))
925 		goto out_dec_map;
926 
927 	rv = tee_entry_std(arg, num_params);
928 
929 	thread_rpc_shm_cache_clear(&thr->shm_cache);
930 	thr->rpc_arg = NULL;
931 
932 out_dec_map:
933 	mobj_dec_map(mobj);
934 out_put_mobj:
935 	mobj_put(mobj);
936 	return rv;
937 }
938 
939 /*
940  * Helper routine for the assembly function thread_std_smc_entry()
941  *
942  * Note: this function is weak just to make it possible to exclude it from
943  * the unpaged area.
944  */
945 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
946 				       uint32_t a2, uint32_t a3,
947 				       uint32_t a4, uint32_t a5 __unused)
948 {
949 	/*
950 	 * Arguments are supplied from handle_yielding_call() as:
951 	 * a0 <- w1
952 	 * a1 <- w3
953 	 * a2 <- w4
954 	 * a3 <- w5
955 	 * a4 <- w6
956 	 * a5 <- w7
957 	 */
958 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
959 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
960 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
961 	return FFA_DENIED;
962 }
963 
964 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
965 {
966 	uint64_t offs = tpm->u.memref.offs;
967 
968 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
969 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
970 
971 	param->u.fmem.offs_low = offs;
972 	param->u.fmem.offs_high = offs >> 32;
973 	if (param->u.fmem.offs_high != offs >> 32)
974 		return false;
975 
976 	param->u.fmem.size = tpm->u.memref.size;
977 	if (tpm->u.memref.mobj) {
978 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
979 
980 		/* If a mobj is passed it better be one with a valid cookie. */
981 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
982 			return false;
983 		param->u.fmem.global_id = cookie;
984 	} else {
985 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
986 	}
987 
988 	return true;
989 }
990 
991 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
992 			    struct thread_param *params,
993 			    struct optee_msg_arg **arg_ret)
994 {
995 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
996 	struct thread_ctx *thr = threads + thread_get_id();
997 	struct optee_msg_arg *arg = thr->rpc_arg;
998 
999 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1000 		return TEE_ERROR_BAD_PARAMETERS;
1001 
1002 	if (!arg) {
1003 		EMSG("rpc_arg not set");
1004 		return TEE_ERROR_GENERIC;
1005 	}
1006 
1007 	memset(arg, 0, sz);
1008 	arg->cmd = cmd;
1009 	arg->num_params = num_params;
1010 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1011 
1012 	for (size_t n = 0; n < num_params; n++) {
1013 		switch (params[n].attr) {
1014 		case THREAD_PARAM_ATTR_NONE:
1015 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1016 			break;
1017 		case THREAD_PARAM_ATTR_VALUE_IN:
1018 		case THREAD_PARAM_ATTR_VALUE_OUT:
1019 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1020 			arg->params[n].attr = params[n].attr -
1021 					      THREAD_PARAM_ATTR_VALUE_IN +
1022 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1023 			arg->params[n].u.value.a = params[n].u.value.a;
1024 			arg->params[n].u.value.b = params[n].u.value.b;
1025 			arg->params[n].u.value.c = params[n].u.value.c;
1026 			break;
1027 		case THREAD_PARAM_ATTR_MEMREF_IN:
1028 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1029 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1030 			if (!set_fmem(arg->params + n, params + n))
1031 				return TEE_ERROR_BAD_PARAMETERS;
1032 			break;
1033 		default:
1034 			return TEE_ERROR_BAD_PARAMETERS;
1035 		}
1036 	}
1037 
1038 	if (arg_ret)
1039 		*arg_ret = arg;
1040 
1041 	return TEE_SUCCESS;
1042 }
1043 
1044 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1045 				struct thread_param *params)
1046 {
1047 	for (size_t n = 0; n < num_params; n++) {
1048 		switch (params[n].attr) {
1049 		case THREAD_PARAM_ATTR_VALUE_OUT:
1050 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1051 			params[n].u.value.a = arg->params[n].u.value.a;
1052 			params[n].u.value.b = arg->params[n].u.value.b;
1053 			params[n].u.value.c = arg->params[n].u.value.c;
1054 			break;
1055 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1056 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1057 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1058 			break;
1059 		default:
1060 			break;
1061 		}
1062 	}
1063 
1064 	return arg->ret;
1065 }
1066 
1067 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1068 			struct thread_param *params)
1069 {
1070 	struct thread_rpc_arg rpc_arg = { .call = {
1071 			.w1 = thread_get_tsd()->rpc_target_info,
1072 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1073 		},
1074 	};
1075 	struct optee_msg_arg *arg = NULL;
1076 	uint32_t ret = 0;
1077 
1078 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1079 	if (ret)
1080 		return ret;
1081 
1082 	thread_rpc(&rpc_arg);
1083 
1084 	return get_rpc_arg_res(arg, num_params, params);
1085 }
1086 
1087 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1088 {
1089 	struct thread_rpc_arg rpc_arg = { .call = {
1090 			.w1 = thread_get_tsd()->rpc_target_info,
1091 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1092 		},
1093 	};
1094 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1095 	uint32_t res2 = 0;
1096 	uint32_t res = 0;
1097 
1098 	DMSG("freeing cookie %#"PRIx64, cookie);
1099 
1100 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1101 
1102 	mobj_put(mobj);
1103 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1104 	if (res2)
1105 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1106 		     cookie, res2);
1107 	if (!res)
1108 		thread_rpc(&rpc_arg);
1109 }
1110 
1111 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1112 {
1113 	struct thread_rpc_arg rpc_arg = { .call = {
1114 			.w1 = thread_get_tsd()->rpc_target_info,
1115 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1116 		},
1117 	};
1118 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1119 	struct optee_msg_arg *arg = NULL;
1120 	unsigned int internal_offset = 0;
1121 	struct mobj *mobj = NULL;
1122 	uint64_t cookie = 0;
1123 
1124 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1125 		return NULL;
1126 
1127 	thread_rpc(&rpc_arg);
1128 
1129 	if (arg->num_params != 1 ||
1130 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1131 		return NULL;
1132 
1133 	internal_offset = arg->params->u.fmem.internal_offs;
1134 	cookie = arg->params->u.fmem.global_id;
1135 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1136 	if (!mobj) {
1137 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1138 		     cookie, internal_offset);
1139 		return NULL;
1140 	}
1141 
1142 	assert(mobj_is_nonsec(mobj));
1143 
1144 	if (mobj_inc_map(mobj)) {
1145 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1146 		mobj_put(mobj);
1147 		return NULL;
1148 	}
1149 
1150 	return mobj;
1151 }
1152 
1153 struct mobj *thread_rpc_alloc_payload(size_t size)
1154 {
1155 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1156 }
1157 
1158 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1159 {
1160 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1161 }
1162 
1163 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1164 {
1165 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL, mobj_get_cookie(mobj), mobj);
1166 }
1167 
1168 void thread_rpc_free_payload(struct mobj *mobj)
1169 {
1170 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1171 			mobj);
1172 }
1173 
1174 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1175 {
1176 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1177 }
1178 
1179 void thread_rpc_free_global_payload(struct mobj *mobj)
1180 {
1181 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
1182 			mobj);
1183 }
1184