xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision f6e2b9e2d1a270542c6f6f5e36ed4e36abe18256)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/secure_partition.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/spmc_sp_handler.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_spmc.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <optee_ffa.h>
21 #include <optee_msg.h>
22 #include <optee_rpc_cmd.h>
23 #include <string.h>
24 #include <sys/queue.h>
25 #include <tee/entry_std.h>
26 #include <util.h>
27 
28 #include "thread_private.h"
29 
30 /* Table 39: Constituent memory region descriptor */
31 struct constituent_address_range {
32 	uint64_t address;
33 	uint32_t page_count;
34 	uint32_t reserved;
35 };
36 
37 /* Table 38: Composite memory region descriptor */
38 struct mem_region_descr {
39 	uint32_t total_page_count;
40 	uint32_t address_range_count;
41 	uint64_t reserved;
42 	struct constituent_address_range address_range_array[];
43 };
44 
45 /* Table 40: Memory access permissions descriptor */
46 struct mem_access_perm_descr {
47 	uint16_t endpoint_id;
48 	uint8_t access_perm;
49 	uint8_t flags;
50 };
51 
52 /* Table 41: Endpoint memory access descriptor */
53 struct mem_accsess_descr {
54 	struct mem_access_perm_descr mem_access_perm_descr;
55 	uint32_t mem_region_offs;
56 	uint64_t reserved;
57 };
58 
59 /* Table 44: Lend, donate or share memory transaction descriptor */
60 struct mem_transaction_descr {
61 	uint16_t sender_id;
62 	uint8_t mem_reg_attr;
63 	uint8_t reserved0;
64 	uint32_t flags;
65 	uint64_t global_handle;
66 	uint64_t tag;
67 	uint32_t reserved1;
68 	uint32_t mem_access_descr_count;
69 	struct mem_accsess_descr mem_accsess_descr_array[];
70 };
71 
72 struct ffa_partition_info {
73 	uint16_t id;
74 	uint16_t execution_context;
75 	uint32_t partition_properties;
76 };
77 
78 struct mem_share_state {
79 	struct mobj_ffa *mf;
80 	unsigned int page_count;
81 	unsigned int region_count;
82 	unsigned int current_page_idx;
83 };
84 
85 struct mem_frag_state {
86 	struct mem_share_state share;
87 	tee_mm_entry_t *mm;
88 	unsigned int frag_offset;
89 	SLIST_ENTRY(mem_frag_state) link;
90 };
91 
92 /*
93  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
94  *
95  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
96  * access this includes the use of content of struct ffa_rxtx::rx and
97  * @frag_state_head.
98  *
99  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
100  * ffa_rxtx::tx and false when it is owned by normal world.
101  *
102  * Note that we can't prevent normal world from updating the content of
103  * these buffers so we must always be careful when reading. while we hold
104  * the lock.
105  */
106 
107 static struct ffa_rxtx nw_rxtx;
108 
109 static bool is_nw_buf(struct ffa_rxtx *rxtx)
110 {
111 	return rxtx == &nw_rxtx;
112 }
113 
114 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
115 	SLIST_HEAD_INITIALIZER(&frag_state_head);
116 
117 static uint32_t swap_src_dst(uint32_t src_dst)
118 {
119 	return (src_dst >> 16) | (src_dst << 16);
120 }
121 
122 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
123 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
124 {
125 	*args = (struct thread_smc_args){ .a0 = fid,
126 					  .a1 = src_dst,
127 					  .a2 = w2,
128 					  .a3 = w3,
129 					  .a4 = w4,
130 					  .a5 = w5, };
131 }
132 
133 void spmc_handle_version(struct thread_smc_args *args)
134 {
135 	/*
136 	 * We currently only support one version, 1.0 so let's keep it
137 	 * simple.
138 	 */
139 	spmc_set_args(args,
140 		      MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
141 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144 
145 static void handle_features(struct thread_smc_args *args)
146 {
147 	uint32_t ret_fid = 0;
148 	uint32_t ret_w2 = FFA_PARAM_MBZ;
149 
150 	switch (args->a1) {
151 #ifdef ARM64
152 	case FFA_RXTX_MAP_64:
153 #endif
154 	case FFA_RXTX_MAP_32:
155 		ret_fid = FFA_SUCCESS_32;
156 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
157 		break;
158 #ifdef ARM64
159 	case FFA_MEM_SHARE_64:
160 #endif
161 	case FFA_MEM_SHARE_32:
162 		ret_fid = FFA_SUCCESS_32;
163 		/*
164 		 * Partition manager supports transmission of a memory
165 		 * transaction descriptor in a buffer dynamically allocated
166 		 * by the endpoint.
167 		 */
168 		ret_w2 = BIT(0);
169 		break;
170 
171 	case FFA_ERROR:
172 	case FFA_VERSION:
173 	case FFA_SUCCESS_32:
174 #ifdef ARM64
175 	case FFA_SUCCESS_64:
176 #endif
177 	case FFA_MEM_FRAG_TX:
178 	case FFA_MEM_RECLAIM:
179 	case FFA_MSG_SEND_DIRECT_REQ_32:
180 	case FFA_INTERRUPT:
181 	case FFA_PARTITION_INFO_GET:
182 	case FFA_RX_RELEASE:
183 		ret_fid = FFA_SUCCESS_32;
184 		break;
185 	default:
186 		ret_fid = FFA_ERROR;
187 		ret_w2 = FFA_NOT_SUPPORTED;
188 		break;
189 	}
190 
191 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
192 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
193 }
194 
195 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
196 {
197 	tee_mm_entry_t *mm = NULL;
198 
199 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
200 		return FFA_INVALID_PARAMETERS;
201 
202 	mm = tee_mm_alloc(&tee_mm_shm, sz);
203 	if (!mm)
204 		return FFA_NO_MEMORY;
205 
206 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
207 					  sz / SMALL_PAGE_SIZE,
208 					  MEM_AREA_NSEC_SHM)) {
209 		tee_mm_free(mm);
210 		return FFA_INVALID_PARAMETERS;
211 	}
212 
213 	*va_ret = (void *)tee_mm_get_smem(mm);
214 	return 0;
215 }
216 
217 static void unmap_buf(void *va, size_t sz)
218 {
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
220 
221 	assert(mm);
222 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
223 	tee_mm_free(mm);
224 }
225 
226 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
227 {
228 	int rc = 0;
229 	uint32_t ret_fid = FFA_ERROR;
230 	unsigned int sz = 0;
231 	paddr_t rx_pa = 0;
232 	paddr_t tx_pa = 0;
233 	void *rx = NULL;
234 	void *tx = NULL;
235 
236 	cpu_spin_lock(&rxtx->spinlock);
237 
238 	if (args->a3 & GENMASK_64(63, 6)) {
239 		rc = FFA_INVALID_PARAMETERS;
240 		goto out;
241 	}
242 
243 	sz = args->a3 * SMALL_PAGE_SIZE;
244 	if (!sz) {
245 		rc = FFA_INVALID_PARAMETERS;
246 		goto out;
247 	}
248 	/* TX/RX are swapped compared to the caller */
249 	tx_pa = args->a2;
250 	rx_pa = args->a1;
251 
252 	if (rxtx->size) {
253 		rc = FFA_DENIED;
254 		goto out;
255 	}
256 
257 	/*
258 	 * If the buffer comes from a SP the address is virtual and already
259 	 * mapped.
260 	 */
261 	if (is_nw_buf(rxtx)) {
262 		rc = map_buf(tx_pa, sz, &tx);
263 		if (rc)
264 			goto out;
265 		rc = map_buf(rx_pa, sz, &rx);
266 		if (rc) {
267 			unmap_buf(tx, sz);
268 			goto out;
269 		}
270 		rxtx->tx = tx;
271 		rxtx->rx = rx;
272 	} else {
273 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
274 			rc = FFA_INVALID_PARAMETERS;
275 			goto out;
276 		}
277 
278 		if (!virt_to_phys((void *)tx_pa) ||
279 		    !virt_to_phys((void *)rx_pa)) {
280 			rc = FFA_INVALID_PARAMETERS;
281 			goto out;
282 		}
283 
284 		rxtx->tx = (void *)tx_pa;
285 		rxtx->rx = (void *)rx_pa;
286 	}
287 
288 	rxtx->size = sz;
289 	rxtx->tx_is_mine = true;
290 	ret_fid = FFA_SUCCESS_32;
291 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
292 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
293 out:
294 	cpu_spin_unlock(&rxtx->spinlock);
295 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
296 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
297 }
298 
299 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
300 {
301 	uint32_t ret_fid = FFA_ERROR;
302 	int rc = FFA_INVALID_PARAMETERS;
303 
304 	cpu_spin_lock(&rxtx->spinlock);
305 
306 	if (!rxtx->size)
307 		goto out;
308 
309 	/* We don't unmap the SP memory as the SP might still use it */
310 	if (is_nw_buf(rxtx)) {
311 		unmap_buf(rxtx->rx, rxtx->size);
312 		unmap_buf(rxtx->tx, rxtx->size);
313 	}
314 	rxtx->size = 0;
315 	rxtx->rx = NULL;
316 	rxtx->tx = NULL;
317 	ret_fid = FFA_SUCCESS_32;
318 	rc = 0;
319 out:
320 	cpu_spin_unlock(&rxtx->spinlock);
321 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
322 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
323 }
324 
325 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
326 {
327 	uint32_t ret_fid = 0;
328 	int rc = 0;
329 
330 	cpu_spin_lock(&rxtx->spinlock);
331 	/* The senders RX is our TX */
332 	if (!rxtx->size || rxtx->tx_is_mine) {
333 		ret_fid = FFA_ERROR;
334 		rc = FFA_DENIED;
335 	} else {
336 		ret_fid = FFA_SUCCESS_32;
337 		rc = 0;
338 		rxtx->tx_is_mine = true;
339 	}
340 	cpu_spin_unlock(&rxtx->spinlock);
341 
342 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
343 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
344 }
345 
346 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
347 {
348 	return !w0 && !w1 && !w2 && !w3;
349 }
350 
351 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
352 {
353 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
354 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
355 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
356 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
357 }
358 
359 static void handle_partition_info_get(struct thread_smc_args *args,
360 				      struct ffa_rxtx *rxtx)
361 {
362 	uint32_t ret_fid = 0;
363 	int rc = 0;
364 
365 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
366 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
367 		ret_fid = FFA_ERROR;
368 		rc = FFA_INVALID_PARAMETERS;
369 		goto out;
370 	}
371 
372 	cpu_spin_lock(&rxtx->spinlock);
373 	if (rxtx->size && rxtx->tx_is_mine) {
374 		struct ffa_partition_info *fpi = rxtx->tx;
375 
376 		fpi->id = SPMC_ENDPOINT_ID;
377 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
378 		fpi->partition_properties = BIT(0) | BIT(1);
379 
380 		ret_fid = FFA_SUCCESS_32;
381 		rc = 1;
382 		rxtx->tx_is_mine = false;
383 	} else {
384 		ret_fid = FFA_ERROR;
385 		if (rxtx->size)
386 			rc = FFA_BUSY;
387 		else
388 			rc = FFA_DENIED; /* TX buffer not setup yet */
389 	}
390 	cpu_spin_unlock(&rxtx->spinlock);
391 
392 out:
393 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
394 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
395 }
396 
397 static void handle_yielding_call(struct thread_smc_args *args)
398 {
399 	uint32_t ret_val = 0;
400 
401 	thread_check_canaries();
402 
403 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
404 		/* Note connection to struct thread_rpc_arg::ret */
405 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
406 				       0);
407 		ret_val = FFA_INVALID_PARAMETERS;
408 	} else {
409 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
410 		ret_val = FFA_BUSY;
411 	}
412 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, swap_src_dst(args->a1),
413 		      0, ret_val, 0, 0);
414 }
415 
416 static void handle_blocking_call(struct thread_smc_args *args)
417 {
418 	switch (args->a3) {
419 	case OPTEE_FFA_GET_API_VERSION:
420 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
421 			      swap_src_dst(args->a1), 0,
422 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
423 			      0);
424 		break;
425 	case OPTEE_FFA_GET_OS_VERSION:
426 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
427 			      swap_src_dst(args->a1), 0,
428 			      CFG_OPTEE_REVISION_MAJOR,
429 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
430 		break;
431 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
432 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
433 			      swap_src_dst(args->a1), 0, 0, 0, 0);
434 		break;
435 	default:
436 		EMSG("Unhandled blocking service ID %#"PRIx32,
437 		     (uint32_t)args->a3);
438 		panic();
439 	}
440 }
441 
442 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
443 			 unsigned int num_mem_accs, uint8_t *acc_perms,
444 			 unsigned int *region_offs)
445 {
446 	unsigned int n = 0;
447 
448 	for (n = 0; n < num_mem_accs; n++) {
449 		struct mem_access_perm_descr *descr =
450 			&mem_acc[n].mem_access_perm_descr;
451 
452 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
453 			*acc_perms = READ_ONCE(descr->access_perm);
454 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
455 			return 0;
456 		}
457 	}
458 
459 	return FFA_INVALID_PARAMETERS;
460 }
461 
462 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
463 			  unsigned int *region_count, size_t *addr_range_offs)
464 {
465 	struct mem_region_descr *region_descr = NULL;
466 	struct mem_transaction_descr *descr = NULL;
467 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
468 	/* Normal memory, Write-Back cacheable, Inner shareable */
469 	const uint8_t exp_mem_reg_attr = 0x2f;
470 	unsigned int num_mem_accs = 0;
471 	uint8_t mem_acc_perm = 0;
472 	unsigned int region_descr_offs = 0;
473 	size_t n = 0;
474 
475 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
476 	    blen < sizeof(struct mem_transaction_descr))
477 		return FFA_INVALID_PARAMETERS;
478 
479 	descr = buf;
480 
481 	/* Check that the endpoint memory access descriptor array fits */
482 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
483 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
484 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
485 		return FFA_INVALID_PARAMETERS;
486 
487 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
488 		return FFA_INVALID_PARAMETERS;
489 
490 	/* Check that the access permissions matches what's expected */
491 	if (get_acc_perms(descr->mem_accsess_descr_array,
492 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
493 	    mem_acc_perm != exp_mem_acc_perm)
494 		return FFA_INVALID_PARAMETERS;
495 
496 	/* Check that the Composite memory region descriptor fits */
497 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
498 	    n > blen)
499 		return FFA_INVALID_PARAMETERS;
500 
501 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
502 			     struct mem_region_descr))
503 		return FFA_INVALID_PARAMETERS;
504 
505 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
506 						    region_descr_offs);
507 	*page_count = READ_ONCE(region_descr->total_page_count);
508 	*region_count = READ_ONCE(region_descr->address_range_count);
509 	*addr_range_offs = n;
510 	return 0;
511 }
512 
513 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
514 				size_t flen)
515 {
516 	unsigned int region_count = flen /
517 				    sizeof(struct constituent_address_range);
518 	struct constituent_address_range *arange = NULL;
519 	unsigned int n = 0;
520 
521 	if (region_count > s->region_count)
522 		region_count = s->region_count;
523 
524 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
525 		return FFA_INVALID_PARAMETERS;
526 	arange = buf;
527 
528 	for (n = 0; n < region_count; n++) {
529 		unsigned int page_count = READ_ONCE(arange[n].page_count);
530 		uint64_t addr = READ_ONCE(arange[n].address);
531 
532 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
533 					  addr, page_count))
534 			return FFA_INVALID_PARAMETERS;
535 	}
536 
537 	s->region_count -= region_count;
538 	if (s->region_count)
539 		return region_count * sizeof(*arange);
540 
541 	if (s->current_page_idx != s->page_count)
542 		return FFA_INVALID_PARAMETERS;
543 
544 	return 0;
545 }
546 
547 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
548 {
549 	int rc = 0;
550 
551 	rc = add_mem_share_helper(&s->share, buf, flen);
552 	if (rc >= 0) {
553 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
554 			if (s->share.region_count)
555 				return s->frag_offset;
556 			/* We're done, return the number of consumed bytes */
557 			rc = s->frag_offset;
558 		} else {
559 			rc = FFA_INVALID_PARAMETERS;
560 		}
561 	}
562 
563 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
564 	if (rc < 0)
565 		mobj_ffa_sel1_spmc_delete(s->share.mf);
566 	else
567 		mobj_ffa_push_to_inactive(s->share.mf);
568 	free(s);
569 
570 	return rc;
571 }
572 
573 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
574 			 size_t flen, uint64_t *global_handle)
575 {
576 	int rc = 0;
577 	struct mem_share_state share = { };
578 	size_t addr_range_offs = 0;
579 	size_t n = 0;
580 
581 	if (flen > blen)
582 		return FFA_INVALID_PARAMETERS;
583 
584 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
585 			    &addr_range_offs);
586 	if (rc)
587 		return rc;
588 
589 	if (MUL_OVERFLOW(share.region_count,
590 			 sizeof(struct constituent_address_range), &n) ||
591 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
592 		return FFA_INVALID_PARAMETERS;
593 
594 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
595 	if (!share.mf)
596 		return FFA_NO_MEMORY;
597 
598 	if (flen != blen) {
599 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
600 
601 		if (!s) {
602 			rc = FFA_NO_MEMORY;
603 			goto err;
604 		}
605 		s->share = share;
606 		s->mm = mm;
607 		s->frag_offset = addr_range_offs;
608 
609 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
610 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
611 					flen - addr_range_offs);
612 
613 		if (rc >= 0)
614 			*global_handle = mobj_ffa_get_cookie(share.mf);
615 
616 		return rc;
617 	}
618 
619 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
620 				  flen - addr_range_offs);
621 	if (rc) {
622 		/*
623 		 * Number of consumed bytes may be returned instead of 0 for
624 		 * done.
625 		 */
626 		rc = FFA_INVALID_PARAMETERS;
627 		goto err;
628 	}
629 
630 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
631 
632 	return 0;
633 err:
634 	mobj_ffa_sel1_spmc_delete(share.mf);
635 	return rc;
636 }
637 
638 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
639 				 unsigned int page_count,
640 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
641 {
642 	int rc = 0;
643 	size_t len = 0;
644 	tee_mm_entry_t *mm = NULL;
645 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
646 
647 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
648 		return FFA_INVALID_PARAMETERS;
649 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
650 		return FFA_INVALID_PARAMETERS;
651 
652 	/*
653 	 * Check that the length reported in blen is covered by len even
654 	 * if the offset is taken into account.
655 	 */
656 	if (len < blen || len - offs < blen)
657 		return FFA_INVALID_PARAMETERS;
658 
659 	mm = tee_mm_alloc(&tee_mm_shm, len);
660 	if (!mm)
661 		return FFA_NO_MEMORY;
662 
663 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
664 					  page_count, MEM_AREA_NSEC_SHM)) {
665 		rc = FFA_INVALID_PARAMETERS;
666 		goto out;
667 	}
668 
669 	cpu_spin_lock(&rxtx->spinlock);
670 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
671 			   global_handle);
672 	cpu_spin_unlock(&rxtx->spinlock);
673 	if (rc > 0)
674 		return rc;
675 
676 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
677 out:
678 	tee_mm_free(mm);
679 	return rc;
680 }
681 
682 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
683 				  uint64_t *global_handle,
684 				  struct ffa_rxtx *rxtx)
685 {
686 	int rc = FFA_DENIED;
687 
688 	cpu_spin_lock(&rxtx->spinlock);
689 
690 	if (rxtx->rx && flen <= rxtx->size)
691 		rc = add_mem_share(NULL, rxtx->rx, blen, flen, global_handle);
692 
693 	cpu_spin_unlock(&rxtx->spinlock);
694 
695 	return rc;
696 }
697 
698 static void handle_mem_share(struct thread_smc_args *args,
699 			     struct ffa_rxtx *rxtx)
700 {
701 	uint32_t ret_w1 = 0;
702 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
703 	uint32_t ret_w3 = 0;
704 	uint32_t ret_fid = FFA_ERROR;
705 	uint64_t global_handle = 0;
706 	int rc = 0;
707 
708 	/* Check that the MBZs are indeed 0 */
709 	if (args->a5 || args->a6 || args->a7)
710 		goto out;
711 
712 	if (!args->a3) {
713 		/*
714 		 * The memory transaction descriptor is passed via our rx
715 		 * buffer.
716 		 */
717 		if (args->a4)
718 			goto out;
719 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
720 					    rxtx);
721 	} else {
722 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
723 					   args->a4, &global_handle, rxtx);
724 	}
725 	if (rc < 0) {
726 		ret_w2 = rc;
727 		goto out;
728 	}
729 	if (rc > 0) {
730 		ret_fid = FFA_MEM_FRAG_RX;
731 		ret_w3 = rc;
732 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
733 	}
734 	ret_fid = FFA_SUCCESS_32;
735 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
736 out:
737 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
738 }
739 
740 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
741 {
742 	struct mem_frag_state *s = NULL;
743 
744 	SLIST_FOREACH(s, &frag_state_head, link)
745 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
746 			return s;
747 
748 	return NULL;
749 }
750 
751 static void handle_mem_frag_tx(struct thread_smc_args *args,
752 			       struct ffa_rxtx *rxtx)
753 {
754 	int rc = 0;
755 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
756 						READ_ONCE(args->a1));
757 	size_t flen = READ_ONCE(args->a3);
758 	struct mem_frag_state *s = NULL;
759 	tee_mm_entry_t *mm = NULL;
760 	unsigned int page_count = 0;
761 	void *buf = NULL;
762 	uint32_t ret_w1 = 0;
763 	uint32_t ret_w2 = 0;
764 	uint32_t ret_w3 = 0;
765 	uint32_t ret_fid = 0;
766 
767 	/*
768 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
769 	 * requests.
770 	 */
771 
772 	cpu_spin_lock(&rxtx->spinlock);
773 
774 	s = get_frag_state(global_handle);
775 	if (!s) {
776 		rc = FFA_INVALID_PARAMETERS;
777 		goto out;
778 	}
779 
780 	mm = s->mm;
781 	if (mm) {
782 		if (flen > tee_mm_get_bytes(mm)) {
783 			rc = FFA_INVALID_PARAMETERS;
784 			goto out;
785 		}
786 		page_count = s->share.page_count;
787 		buf = (void *)tee_mm_get_smem(mm);
788 	} else {
789 		if (flen > rxtx->size) {
790 			rc = FFA_INVALID_PARAMETERS;
791 			goto out;
792 		}
793 		buf = rxtx->rx;
794 	}
795 
796 	rc = add_mem_share_frag(s, buf, flen);
797 out:
798 	cpu_spin_unlock(&rxtx->spinlock);
799 
800 	if (rc <= 0 && mm) {
801 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
802 		tee_mm_free(mm);
803 	}
804 
805 	if (rc < 0) {
806 		ret_fid = FFA_ERROR;
807 		ret_w2 = rc;
808 	} else if (rc > 0) {
809 		ret_fid = FFA_MEM_FRAG_RX;
810 		ret_w3 = rc;
811 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
812 	} else {
813 		ret_fid = FFA_SUCCESS_32;
814 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
815 	}
816 
817 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
818 }
819 
820 static void handle_mem_reclaim(struct thread_smc_args *args)
821 {
822 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
823 	uint32_t ret_fid = FFA_ERROR;
824 	uint64_t cookie = 0;
825 
826 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
827 		goto out;
828 
829 	cookie = reg_pair_to_64(args->a2, args->a1);
830 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
831 	case TEE_SUCCESS:
832 		ret_fid = FFA_SUCCESS_32;
833 		ret_val = 0;
834 		break;
835 	case TEE_ERROR_ITEM_NOT_FOUND:
836 		DMSG("cookie %#"PRIx64" not found", cookie);
837 		ret_val = FFA_INVALID_PARAMETERS;
838 		break;
839 	default:
840 		DMSG("cookie %#"PRIx64" busy", cookie);
841 		ret_val = FFA_DENIED;
842 		break;
843 	}
844 out:
845 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
846 }
847 
848 /* Only called from assembly */
849 void thread_spmc_msg_recv(struct thread_smc_args *args);
850 void thread_spmc_msg_recv(struct thread_smc_args *args)
851 {
852 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
853 	switch (args->a0) {
854 	case FFA_VERSION:
855 		spmc_handle_version(args);
856 		break;
857 	case FFA_FEATURES:
858 		handle_features(args);
859 		break;
860 #ifdef ARM64
861 	case FFA_RXTX_MAP_64:
862 #endif
863 	case FFA_RXTX_MAP_32:
864 		spmc_handle_rxtx_map(args, &nw_rxtx);
865 		break;
866 	case FFA_RXTX_UNMAP:
867 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
868 		break;
869 	case FFA_RX_RELEASE:
870 		spmc_handle_rx_release(args, &nw_rxtx);
871 		break;
872 	case FFA_PARTITION_INFO_GET:
873 		handle_partition_info_get(args, &nw_rxtx);
874 		break;
875 	case FFA_INTERRUPT:
876 		itr_core_handler();
877 		spmc_set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
878 		break;
879 	case FFA_MSG_SEND_DIRECT_REQ_32:
880 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
881 		    FFA_DST(args->a1) != SPMC_ENDPOINT_ID) {
882 			spmc_sp_start_thread(args);
883 			break;
884 		}
885 
886 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
887 			handle_yielding_call(args);
888 		else
889 			handle_blocking_call(args);
890 		break;
891 #ifdef ARM64
892 	case FFA_MEM_SHARE_64:
893 #endif
894 	case FFA_MEM_SHARE_32:
895 		handle_mem_share(args, &nw_rxtx);
896 		break;
897 	case FFA_MEM_RECLAIM:
898 		handle_mem_reclaim(args);
899 		break;
900 	case FFA_MEM_FRAG_TX:
901 		handle_mem_frag_tx(args, &nw_rxtx);
902 		break;
903 	default:
904 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
905 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
906 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
907 	}
908 }
909 
910 static uint32_t yielding_call_with_arg(uint64_t cookie)
911 {
912 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
913 	struct optee_msg_arg *arg = NULL;
914 	struct mobj *mobj = NULL;
915 	uint32_t num_params = 0;
916 
917 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
918 	if (!mobj) {
919 		EMSG("Can't find cookie %#"PRIx64, cookie);
920 		return TEE_ERROR_BAD_PARAMETERS;
921 	}
922 
923 	rv = mobj_inc_map(mobj);
924 	if (rv)
925 		goto out_put_mobj;
926 
927 	rv = TEE_ERROR_BAD_PARAMETERS;
928 	arg = mobj_get_va(mobj, 0);
929 	if (!arg)
930 		goto out_dec_map;
931 
932 	if (!mobj_get_va(mobj, sizeof(*arg)))
933 		goto out_dec_map;
934 
935 	num_params = READ_ONCE(arg->num_params);
936 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
937 		goto out_dec_map;
938 
939 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
940 		goto out_dec_map;
941 
942 	rv = tee_entry_std(arg, num_params);
943 
944 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
945 
946 out_dec_map:
947 	mobj_dec_map(mobj);
948 out_put_mobj:
949 	mobj_put(mobj);
950 	return rv;
951 }
952 
953 static uint32_t yielding_unregister_shm(uint64_t cookie)
954 {
955 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
956 
957 	switch (res) {
958 	case TEE_SUCCESS:
959 	case TEE_ERROR_ITEM_NOT_FOUND:
960 		return 0;
961 	case TEE_ERROR_BUSY:
962 		EMSG("res %#"PRIx32, res);
963 		return FFA_BUSY;
964 	default:
965 		EMSG("res %#"PRIx32, res);
966 		return FFA_INVALID_PARAMETERS;
967 	}
968 }
969 
970 /*
971  * Helper routine for the assembly function thread_std_smc_entry()
972  *
973  * Note: this function is weak just to make it possible to exclude it from
974  * the unpaged area.
975  */
976 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
977 				       uint32_t a2, uint32_t a3)
978 {
979 	/*
980 	 * Arguments are supplied from handle_yielding_call() as:
981 	 * a0 <- w1
982 	 * a1 <- w3
983 	 * a2 <- w4
984 	 * a3 <- w5
985 	 */
986 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
987 	switch (a1) {
988 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
989 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
990 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
991 		return FFA_NOT_SUPPORTED;
992 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
993 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
994 	default:
995 		return FFA_DENIED;
996 	}
997 }
998 
999 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1000 {
1001 	uint64_t offs = tpm->u.memref.offs;
1002 
1003 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1004 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1005 
1006 	param->u.fmem.offs_low = offs;
1007 	param->u.fmem.offs_high = offs >> 32;
1008 	if (param->u.fmem.offs_high != offs >> 32)
1009 		return false;
1010 
1011 	param->u.fmem.size = tpm->u.memref.size;
1012 	if (tpm->u.memref.mobj) {
1013 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
1014 		if (!param->u.fmem.global_id)
1015 			return false;
1016 	} else {
1017 		param->u.fmem.global_id = 0;
1018 	}
1019 
1020 	return true;
1021 }
1022 
1023 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
1024 {
1025 	TEE_Result res = TEE_SUCCESS;
1026 	struct thread_rpc_arg rpc_arg = { .call = {
1027 			.w1 = thread_get_tsd()->rpc_target_info,
1028 			.w4 = type,
1029 		},
1030 	};
1031 
1032 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
1033 	mobj_put(mobj);
1034 	res = mobj_ffa_unregister_by_cookie(cookie);
1035 	if (res)
1036 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
1037 		     cookie, res);
1038 	thread_rpc(&rpc_arg);
1039 }
1040 
1041 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1042 {
1043 	struct mobj *mobj = NULL;
1044 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1045 				  SMALL_PAGE_SIZE;
1046 	struct thread_rpc_arg rpc_arg = { .call = {
1047 			.w1 = thread_get_tsd()->rpc_target_info,
1048 			.w4 = type,
1049 			.w5 = page_count,
1050 		},
1051 	};
1052 	unsigned int internal_offset = 0;
1053 	uint64_t cookie = 0;
1054 
1055 	thread_rpc(&rpc_arg);
1056 
1057 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1058 	if (!cookie)
1059 		return NULL;
1060 	internal_offset = rpc_arg.ret.w6;
1061 
1062 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1063 	if (!mobj) {
1064 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1065 		     cookie, internal_offset);
1066 		return NULL;
1067 	}
1068 
1069 	assert(mobj_is_nonsec(mobj));
1070 
1071 	if (mobj_inc_map(mobj)) {
1072 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1073 		mobj_put(mobj);
1074 		return NULL;
1075 	}
1076 
1077 	return mobj;
1078 }
1079 
1080 struct mobj *thread_rpc_alloc_payload(size_t size)
1081 {
1082 	return thread_rpc_alloc(size,
1083 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1084 }
1085 
1086 void thread_rpc_free_payload(struct mobj *mobj)
1087 {
1088 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1089 			mobj_get_cookie(mobj), mobj);
1090 }
1091 
1092 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1093 {
1094 	return thread_rpc_alloc(size,
1095 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1096 }
1097 
1098 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1099 {
1100 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1101 			mobj_get_cookie(mobj), mobj);
1102 }
1103 
1104 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1105 			    struct thread_param *params,
1106 			    struct optee_msg_arg **arg_ret,
1107 			    uint64_t *carg_ret)
1108 {
1109 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1110 	struct thread_ctx *thr = threads + thread_get_id();
1111 	struct optee_msg_arg *arg = thr->rpc_arg;
1112 
1113 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1114 		return TEE_ERROR_BAD_PARAMETERS;
1115 
1116 	if (!arg) {
1117 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1118 
1119 		if (!mobj)
1120 			return TEE_ERROR_OUT_OF_MEMORY;
1121 
1122 		arg = mobj_get_va(mobj, 0);
1123 		if (!arg) {
1124 			thread_rpc_free_kernel_payload(mobj);
1125 			return TEE_ERROR_OUT_OF_MEMORY;
1126 		}
1127 
1128 		thr->rpc_arg = arg;
1129 		thr->rpc_mobj = mobj;
1130 	}
1131 
1132 	memset(arg, 0, sz);
1133 	arg->cmd = cmd;
1134 	arg->num_params = num_params;
1135 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1136 
1137 	for (size_t n = 0; n < num_params; n++) {
1138 		switch (params[n].attr) {
1139 		case THREAD_PARAM_ATTR_NONE:
1140 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1141 			break;
1142 		case THREAD_PARAM_ATTR_VALUE_IN:
1143 		case THREAD_PARAM_ATTR_VALUE_OUT:
1144 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1145 			arg->params[n].attr = params[n].attr -
1146 					      THREAD_PARAM_ATTR_VALUE_IN +
1147 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1148 			arg->params[n].u.value.a = params[n].u.value.a;
1149 			arg->params[n].u.value.b = params[n].u.value.b;
1150 			arg->params[n].u.value.c = params[n].u.value.c;
1151 			break;
1152 		case THREAD_PARAM_ATTR_MEMREF_IN:
1153 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1154 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1155 			if (!set_fmem(arg->params + n, params + n))
1156 				return TEE_ERROR_BAD_PARAMETERS;
1157 			break;
1158 		default:
1159 			return TEE_ERROR_BAD_PARAMETERS;
1160 		}
1161 	}
1162 
1163 	*arg_ret = arg;
1164 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1165 
1166 	return TEE_SUCCESS;
1167 }
1168 
1169 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1170 				struct thread_param *params)
1171 {
1172 	for (size_t n = 0; n < num_params; n++) {
1173 		switch (params[n].attr) {
1174 		case THREAD_PARAM_ATTR_VALUE_OUT:
1175 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1176 			params[n].u.value.a = arg->params[n].u.value.a;
1177 			params[n].u.value.b = arg->params[n].u.value.b;
1178 			params[n].u.value.c = arg->params[n].u.value.c;
1179 			break;
1180 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1181 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1182 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1183 			break;
1184 		default:
1185 			break;
1186 		}
1187 	}
1188 
1189 	return arg->ret;
1190 }
1191 
1192 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1193 			struct thread_param *params)
1194 {
1195 	struct thread_rpc_arg rpc_arg = { .call = {
1196 			.w1 = thread_get_tsd()->rpc_target_info,
1197 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1198 		},
1199 	};
1200 	uint64_t carg = 0;
1201 	struct optee_msg_arg *arg = NULL;
1202 	uint32_t ret = 0;
1203 
1204 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1205 	if (ret)
1206 		return ret;
1207 
1208 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1209 	thread_rpc(&rpc_arg);
1210 
1211 	return get_rpc_arg_res(arg, num_params, params);
1212 }
1213 
1214 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1215 {
1216 	return NULL;
1217 }
1218 
1219 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1220 {
1221 	/*
1222 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1223 	 * returns NULL.
1224 	 */
1225 	volatile bool cant_happen __maybe_unused = true;
1226 
1227 	assert(!cant_happen);
1228 }
1229