xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/secure_partition.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/spmc_sp_handler.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_spmc.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <optee_ffa.h>
21 #include <optee_msg.h>
22 #include <optee_rpc_cmd.h>
23 #include <string.h>
24 #include <sys/queue.h>
25 #include <tee/entry_std.h>
26 #include <util.h>
27 
28 #include "thread_private.h"
29 
30 /* Table 39: Constituent memory region descriptor */
31 struct constituent_address_range {
32 	uint64_t address;
33 	uint32_t page_count;
34 	uint32_t reserved;
35 };
36 
37 /* Table 38: Composite memory region descriptor */
38 struct mem_region_descr {
39 	uint32_t total_page_count;
40 	uint32_t address_range_count;
41 	uint64_t reserved;
42 	struct constituent_address_range address_range_array[];
43 };
44 
45 /* Table 40: Memory access permissions descriptor */
46 struct mem_access_perm_descr {
47 	uint16_t endpoint_id;
48 	uint8_t access_perm;
49 	uint8_t flags;
50 };
51 
52 /* Table 41: Endpoint memory access descriptor */
53 struct mem_accsess_descr {
54 	struct mem_access_perm_descr mem_access_perm_descr;
55 	uint32_t mem_region_offs;
56 	uint64_t reserved;
57 };
58 
59 /* Table 44: Lend, donate or share memory transaction descriptor */
60 struct mem_transaction_descr {
61 	uint16_t sender_id;
62 	uint8_t mem_reg_attr;
63 	uint8_t reserved0;
64 	uint32_t flags;
65 	uint64_t global_handle;
66 	uint64_t tag;
67 	uint32_t reserved1;
68 	uint32_t mem_access_descr_count;
69 	struct mem_accsess_descr mem_accsess_descr_array[];
70 };
71 
72 struct ffa_partition_info {
73 	uint16_t id;
74 	uint16_t execution_context;
75 	uint32_t partition_properties;
76 };
77 
78 struct mem_share_state {
79 	struct mobj_ffa *mf;
80 	unsigned int page_count;
81 	unsigned int region_count;
82 	unsigned int current_page_idx;
83 };
84 
85 struct mem_frag_state {
86 	struct mem_share_state share;
87 	tee_mm_entry_t *mm;
88 	unsigned int frag_offset;
89 	SLIST_ENTRY(mem_frag_state) link;
90 };
91 
92 /*
93  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
94  *
95  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
96  * access this includes the use of content of struct ffa_rxtx::rx and
97  * @frag_state_head.
98  *
99  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
100  * ffa_rxtx::tx and false when it is owned by normal world.
101  *
102  * Note that we can't prevent normal world from updating the content of
103  * these buffers so we must always be careful when reading. while we hold
104  * the lock.
105  */
106 
107 static struct ffa_rxtx nw_rxtx;
108 
109 static bool is_nw_buf(struct ffa_rxtx *rxtx)
110 {
111 	return rxtx == &nw_rxtx;
112 }
113 
114 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
115 	SLIST_HEAD_INITIALIZER(&frag_state_head);
116 
117 static uint32_t swap_src_dst(uint32_t src_dst)
118 {
119 	return (src_dst >> 16) | (src_dst << 16);
120 }
121 
122 static void set_args(struct thread_smc_args *args, uint32_t fid,
123 		     uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
124 		     uint32_t w5)
125 {
126 	*args = (struct thread_smc_args){ .a0 = fid,
127 					  .a1 = src_dst,
128 					  .a2 = w2,
129 					  .a3 = w3,
130 					  .a4 = w4,
131 					  .a5 = w5, };
132 }
133 
134 static void handle_version(struct thread_smc_args *args)
135 {
136 	/*
137 	 * We currently only support one version, 1.0 so let's keep it
138 	 * simple.
139 	 */
140 	set_args(args, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
141 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		 FFA_PARAM_MBZ);
143 }
144 
145 static void handle_features(struct thread_smc_args *args)
146 {
147 	uint32_t ret_fid = 0;
148 	uint32_t ret_w2 = FFA_PARAM_MBZ;
149 
150 	switch (args->a1) {
151 #ifdef ARM64
152 	case FFA_RXTX_MAP_64:
153 #endif
154 	case FFA_RXTX_MAP_32:
155 		ret_fid = FFA_SUCCESS_32;
156 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
157 		break;
158 #ifdef ARM64
159 	case FFA_MEM_SHARE_64:
160 #endif
161 	case FFA_MEM_SHARE_32:
162 		ret_fid = FFA_SUCCESS_32;
163 		/*
164 		 * Partition manager supports transmission of a memory
165 		 * transaction descriptor in a buffer dynamically allocated
166 		 * by the endpoint.
167 		 */
168 		ret_w2 = BIT(0);
169 		break;
170 
171 	case FFA_ERROR:
172 	case FFA_VERSION:
173 	case FFA_SUCCESS_32:
174 #ifdef ARM64
175 	case FFA_SUCCESS_64:
176 #endif
177 	case FFA_MEM_FRAG_TX:
178 	case FFA_MEM_RECLAIM:
179 	case FFA_MSG_SEND_DIRECT_REQ_32:
180 	case FFA_INTERRUPT:
181 	case FFA_PARTITION_INFO_GET:
182 	case FFA_RX_RELEASE:
183 		ret_fid = FFA_SUCCESS_32;
184 		break;
185 	default:
186 		ret_fid = FFA_ERROR;
187 		ret_w2 = FFA_NOT_SUPPORTED;
188 		break;
189 	}
190 
191 	set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2,
192 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
193 }
194 
195 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
196 {
197 	tee_mm_entry_t *mm = NULL;
198 
199 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
200 		return FFA_INVALID_PARAMETERS;
201 
202 	mm = tee_mm_alloc(&tee_mm_shm, sz);
203 	if (!mm)
204 		return FFA_NO_MEMORY;
205 
206 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
207 					  sz / SMALL_PAGE_SIZE,
208 					  MEM_AREA_NSEC_SHM)) {
209 		tee_mm_free(mm);
210 		return FFA_INVALID_PARAMETERS;
211 	}
212 
213 	*va_ret = (void *)tee_mm_get_smem(mm);
214 	return 0;
215 }
216 
217 static void unmap_buf(void *va, size_t sz)
218 {
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
220 
221 	assert(mm);
222 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
223 	tee_mm_free(mm);
224 }
225 
226 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
227 {
228 	int rc = 0;
229 	uint32_t ret_fid = FFA_ERROR;
230 	unsigned int sz = 0;
231 	paddr_t rx_pa = 0;
232 	paddr_t tx_pa = 0;
233 	void *rx = NULL;
234 	void *tx = NULL;
235 
236 	cpu_spin_lock(&rxtx->spinlock);
237 
238 	if (args->a3 & GENMASK_64(63, 6)) {
239 		rc = FFA_INVALID_PARAMETERS;
240 		goto out;
241 	}
242 
243 	sz = args->a3 * SMALL_PAGE_SIZE;
244 	if (!sz) {
245 		rc = FFA_INVALID_PARAMETERS;
246 		goto out;
247 	}
248 	/* TX/RX are swapped compared to the caller */
249 	tx_pa = args->a2;
250 	rx_pa = args->a1;
251 
252 	if (rxtx->size) {
253 		rc = FFA_DENIED;
254 		goto out;
255 	}
256 
257 	/*
258 	 * If the buffer comes from a SP the address is virtual and already
259 	 * mapped.
260 	 */
261 	if (is_nw_buf(rxtx)) {
262 		rc = map_buf(tx_pa, sz, &tx);
263 		if (rc)
264 			goto out;
265 		rc = map_buf(rx_pa, sz, &rx);
266 		if (rc) {
267 			unmap_buf(tx, sz);
268 			goto out;
269 		}
270 		rxtx->tx = tx;
271 		rxtx->rx = rx;
272 	} else {
273 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
274 			rc = FFA_INVALID_PARAMETERS;
275 			goto out;
276 		}
277 
278 		if (!virt_to_phys((void *)tx_pa) ||
279 		    !virt_to_phys((void *)rx_pa)) {
280 			rc = FFA_INVALID_PARAMETERS;
281 			goto out;
282 		}
283 
284 		rxtx->tx = (void *)tx_pa;
285 		rxtx->rx = (void *)rx_pa;
286 	}
287 
288 	rxtx->size = sz;
289 	rxtx->tx_is_mine = true;
290 	ret_fid = FFA_SUCCESS_32;
291 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
292 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
293 out:
294 	cpu_spin_unlock(&rxtx->spinlock);
295 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
296 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
297 }
298 
299 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
300 {
301 	uint32_t ret_fid = FFA_ERROR;
302 	int rc = FFA_INVALID_PARAMETERS;
303 
304 	cpu_spin_lock(&rxtx->spinlock);
305 
306 	if (!rxtx->size)
307 		goto out;
308 
309 	/* We don't unmap the SP memory as the SP might still use it */
310 	if (is_nw_buf(rxtx)) {
311 		unmap_buf(rxtx->rx, rxtx->size);
312 		unmap_buf(rxtx->tx, rxtx->size);
313 	}
314 	rxtx->size = 0;
315 	rxtx->rx = NULL;
316 	rxtx->tx = NULL;
317 	ret_fid = FFA_SUCCESS_32;
318 	rc = 0;
319 out:
320 	cpu_spin_unlock(&rxtx->spinlock);
321 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
322 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
323 }
324 
325 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
326 {
327 	uint32_t ret_fid = 0;
328 	int rc = 0;
329 
330 	cpu_spin_lock(&rxtx->spinlock);
331 	/* The senders RX is our TX */
332 	if (!rxtx->size || rxtx->tx_is_mine) {
333 		ret_fid = FFA_ERROR;
334 		rc = FFA_DENIED;
335 	} else {
336 		ret_fid = FFA_SUCCESS_32;
337 		rc = 0;
338 		rxtx->tx_is_mine = true;
339 	}
340 	cpu_spin_unlock(&rxtx->spinlock);
341 
342 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
343 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
344 }
345 
346 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
347 {
348 	return !w0 && !w1 && !w2 && !w3;
349 }
350 
351 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
352 {
353 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
354 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
355 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
356 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
357 }
358 
359 static void handle_partition_info_get(struct thread_smc_args *args,
360 				      struct ffa_rxtx *rxtx)
361 {
362 	uint32_t ret_fid = 0;
363 	int rc = 0;
364 
365 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
366 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
367 		ret_fid = FFA_ERROR;
368 		rc = FFA_INVALID_PARAMETERS;
369 		goto out;
370 	}
371 
372 	cpu_spin_lock(&rxtx->spinlock);
373 	if (rxtx->size && rxtx->tx_is_mine) {
374 		struct ffa_partition_info *fpi = rxtx->tx;
375 
376 		fpi->id = SPMC_ENDPOINT_ID;
377 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
378 		fpi->partition_properties = BIT(0) | BIT(1);
379 
380 		ret_fid = FFA_SUCCESS_32;
381 		rc = 1;
382 		rxtx->tx_is_mine = false;
383 	} else {
384 		ret_fid = FFA_ERROR;
385 		if (rxtx->size)
386 			rc = FFA_BUSY;
387 		else
388 			rc = FFA_DENIED; /* TX buffer not setup yet */
389 	}
390 	cpu_spin_unlock(&rxtx->spinlock);
391 
392 out:
393 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
394 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
395 }
396 
397 static void handle_yielding_call(struct thread_smc_args *args)
398 {
399 	uint32_t ret_val = 0;
400 
401 	thread_check_canaries();
402 
403 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
404 		/* Note connection to struct thread_rpc_arg::ret */
405 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
406 				       0);
407 		ret_val = FFA_INVALID_PARAMETERS;
408 	} else {
409 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
410 		ret_val = FFA_BUSY;
411 	}
412 	set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
413 		 swap_src_dst(args->a1), 0, ret_val, 0, 0);
414 }
415 
416 static void handle_blocking_call(struct thread_smc_args *args)
417 {
418 	switch (args->a3) {
419 	case OPTEE_FFA_GET_API_VERSION:
420 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
421 			 swap_src_dst(args->a1), 0, OPTEE_FFA_VERSION_MAJOR,
422 			 OPTEE_FFA_VERSION_MINOR, 0);
423 		break;
424 	case OPTEE_FFA_GET_OS_VERSION:
425 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
426 			 swap_src_dst(args->a1), 0, CFG_OPTEE_REVISION_MAJOR,
427 			 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
428 		break;
429 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
430 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
431 			 swap_src_dst(args->a1), 0, 0, 0, 0);
432 		break;
433 	default:
434 		EMSG("Unhandled blocking service ID %#"PRIx32,
435 		     (uint32_t)args->a3);
436 		panic();
437 	}
438 }
439 
440 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
441 			 unsigned int num_mem_accs, uint8_t *acc_perms,
442 			 unsigned int *region_offs)
443 {
444 	unsigned int n = 0;
445 
446 	for (n = 0; n < num_mem_accs; n++) {
447 		struct mem_access_perm_descr *descr =
448 			&mem_acc[n].mem_access_perm_descr;
449 
450 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
451 			*acc_perms = READ_ONCE(descr->access_perm);
452 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
453 			return 0;
454 		}
455 	}
456 
457 	return FFA_INVALID_PARAMETERS;
458 }
459 
460 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
461 			  unsigned int *region_count, size_t *addr_range_offs)
462 {
463 	struct mem_region_descr *region_descr = NULL;
464 	struct mem_transaction_descr *descr = NULL;
465 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
466 	/* Normal memory, Write-Back cacheable, Inner shareable */
467 	const uint8_t exp_mem_reg_attr = 0x2f;
468 	unsigned int num_mem_accs = 0;
469 	uint8_t mem_acc_perm = 0;
470 	unsigned int region_descr_offs = 0;
471 	size_t n = 0;
472 
473 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
474 	    blen < sizeof(struct mem_transaction_descr))
475 		return FFA_INVALID_PARAMETERS;
476 
477 	descr = buf;
478 
479 	/* Check that the endpoint memory access descriptor array fits */
480 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
481 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
482 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
483 		return FFA_INVALID_PARAMETERS;
484 
485 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
486 		return FFA_INVALID_PARAMETERS;
487 
488 	/* Check that the access permissions matches what's expected */
489 	if (get_acc_perms(descr->mem_accsess_descr_array,
490 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
491 	    mem_acc_perm != exp_mem_acc_perm)
492 		return FFA_INVALID_PARAMETERS;
493 
494 	/* Check that the Composite memory region descriptor fits */
495 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
496 	    n > blen)
497 		return FFA_INVALID_PARAMETERS;
498 
499 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
500 			     struct mem_region_descr))
501 		return FFA_INVALID_PARAMETERS;
502 
503 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
504 						    region_descr_offs);
505 	*page_count = READ_ONCE(region_descr->total_page_count);
506 	*region_count = READ_ONCE(region_descr->address_range_count);
507 	*addr_range_offs = n;
508 	return 0;
509 }
510 
511 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
512 				size_t flen)
513 {
514 	unsigned int region_count = flen /
515 				    sizeof(struct constituent_address_range);
516 	struct constituent_address_range *arange = NULL;
517 	unsigned int n = 0;
518 
519 	if (region_count > s->region_count)
520 		region_count = s->region_count;
521 
522 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
523 		return FFA_INVALID_PARAMETERS;
524 	arange = buf;
525 
526 	for (n = 0; n < region_count; n++) {
527 		unsigned int page_count = READ_ONCE(arange[n].page_count);
528 		uint64_t addr = READ_ONCE(arange[n].address);
529 
530 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
531 					  addr, page_count))
532 			return FFA_INVALID_PARAMETERS;
533 	}
534 
535 	s->region_count -= region_count;
536 	if (s->region_count)
537 		return region_count * sizeof(*arange);
538 
539 	if (s->current_page_idx != s->page_count)
540 		return FFA_INVALID_PARAMETERS;
541 
542 	return 0;
543 }
544 
545 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
546 {
547 	int rc = 0;
548 
549 	rc = add_mem_share_helper(&s->share, buf, flen);
550 	if (rc >= 0) {
551 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
552 			if (s->share.region_count)
553 				return s->frag_offset;
554 			/* We're done, return the number of consumed bytes */
555 			rc = s->frag_offset;
556 		} else {
557 			rc = FFA_INVALID_PARAMETERS;
558 		}
559 	}
560 
561 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
562 	if (rc < 0)
563 		mobj_ffa_sel1_spmc_delete(s->share.mf);
564 	else
565 		mobj_ffa_push_to_inactive(s->share.mf);
566 	free(s);
567 
568 	return rc;
569 }
570 
571 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
572 			 size_t flen, uint64_t *global_handle)
573 {
574 	int rc = 0;
575 	struct mem_share_state share = { };
576 	size_t addr_range_offs = 0;
577 	size_t n = 0;
578 
579 	if (flen > blen)
580 		return FFA_INVALID_PARAMETERS;
581 
582 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
583 			    &addr_range_offs);
584 	if (rc)
585 		return rc;
586 
587 	if (MUL_OVERFLOW(share.region_count,
588 			 sizeof(struct constituent_address_range), &n) ||
589 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
590 		return FFA_INVALID_PARAMETERS;
591 
592 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
593 	if (!share.mf)
594 		return FFA_NO_MEMORY;
595 
596 	if (flen != blen) {
597 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
598 
599 		if (!s) {
600 			rc = FFA_NO_MEMORY;
601 			goto err;
602 		}
603 		s->share = share;
604 		s->mm = mm;
605 		s->frag_offset = addr_range_offs;
606 
607 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
608 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
609 					flen - addr_range_offs);
610 
611 		if (rc >= 0)
612 			*global_handle = mobj_ffa_get_cookie(share.mf);
613 
614 		return rc;
615 	}
616 
617 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
618 				  flen - addr_range_offs);
619 	if (rc) {
620 		/*
621 		 * Number of consumed bytes may be returned instead of 0 for
622 		 * done.
623 		 */
624 		rc = FFA_INVALID_PARAMETERS;
625 		goto err;
626 	}
627 
628 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
629 
630 	return 0;
631 err:
632 	mobj_ffa_sel1_spmc_delete(share.mf);
633 	return rc;
634 }
635 
636 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
637 				 unsigned int page_count,
638 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
639 {
640 	int rc = 0;
641 	size_t len = 0;
642 	tee_mm_entry_t *mm = NULL;
643 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
644 
645 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
646 		return FFA_INVALID_PARAMETERS;
647 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
648 		return FFA_INVALID_PARAMETERS;
649 
650 	/*
651 	 * Check that the length reported in blen is covered by len even
652 	 * if the offset is taken into account.
653 	 */
654 	if (len < blen || len - offs < blen)
655 		return FFA_INVALID_PARAMETERS;
656 
657 	mm = tee_mm_alloc(&tee_mm_shm, len);
658 	if (!mm)
659 		return FFA_NO_MEMORY;
660 
661 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
662 					  page_count, MEM_AREA_NSEC_SHM)) {
663 		rc = FFA_INVALID_PARAMETERS;
664 		goto out;
665 	}
666 
667 	cpu_spin_lock(&rxtx->spinlock);
668 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
669 			   global_handle);
670 	cpu_spin_unlock(&rxtx->spinlock);
671 	if (rc > 0)
672 		return rc;
673 
674 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
675 out:
676 	tee_mm_free(mm);
677 	return rc;
678 }
679 
680 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
681 				  uint64_t *global_handle,
682 				  struct ffa_rxtx *rxtx)
683 {
684 	int rc = FFA_DENIED;
685 
686 	cpu_spin_lock(&rxtx->spinlock);
687 
688 	if (rxtx->rx && flen <= rxtx->size)
689 		rc = add_mem_share(NULL, rxtx->rx, blen, flen, global_handle);
690 
691 	cpu_spin_unlock(&rxtx->spinlock);
692 
693 	return rc;
694 }
695 
696 static void handle_mem_share(struct thread_smc_args *args,
697 			     struct ffa_rxtx *rxtx)
698 {
699 	uint32_t ret_w1 = 0;
700 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
701 	uint32_t ret_w3 = 0;
702 	uint32_t ret_fid = FFA_ERROR;
703 	uint64_t global_handle = 0;
704 	int rc = 0;
705 
706 	/* Check that the MBZs are indeed 0 */
707 	if (args->a5 || args->a6 || args->a7)
708 		goto out;
709 
710 	if (!args->a3) {
711 		/*
712 		 * The memory transaction descriptor is passed via our rx
713 		 * buffer.
714 		 */
715 		if (args->a4)
716 			goto out;
717 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
718 					    rxtx);
719 	} else {
720 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
721 					   args->a4, &global_handle, rxtx);
722 	}
723 	if (rc < 0) {
724 		ret_w2 = rc;
725 		goto out;
726 	}
727 	if (rc > 0) {
728 		ret_fid = FFA_MEM_FRAG_RX;
729 		ret_w3 = rc;
730 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
731 	}
732 	ret_fid = FFA_SUCCESS_32;
733 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
734 out:
735 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
736 }
737 
738 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
739 {
740 	struct mem_frag_state *s = NULL;
741 
742 	SLIST_FOREACH(s, &frag_state_head, link)
743 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
744 			return s;
745 
746 	return NULL;
747 }
748 
749 static void handle_mem_frag_tx(struct thread_smc_args *args,
750 			       struct ffa_rxtx *rxtx)
751 {
752 	int rc = 0;
753 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
754 						READ_ONCE(args->a1));
755 	size_t flen = READ_ONCE(args->a3);
756 	struct mem_frag_state *s = NULL;
757 	tee_mm_entry_t *mm = NULL;
758 	unsigned int page_count = 0;
759 	void *buf = NULL;
760 	uint32_t ret_w1 = 0;
761 	uint32_t ret_w2 = 0;
762 	uint32_t ret_w3 = 0;
763 	uint32_t ret_fid = 0;
764 
765 	/*
766 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
767 	 * requests.
768 	 */
769 
770 	cpu_spin_lock(&rxtx->spinlock);
771 
772 	s = get_frag_state(global_handle);
773 	if (!s) {
774 		rc = FFA_INVALID_PARAMETERS;
775 		goto out;
776 	}
777 
778 	mm = s->mm;
779 	if (mm) {
780 		if (flen > tee_mm_get_bytes(mm)) {
781 			rc = FFA_INVALID_PARAMETERS;
782 			goto out;
783 		}
784 		page_count = s->share.page_count;
785 		buf = (void *)tee_mm_get_smem(mm);
786 	} else {
787 		if (flen > rxtx->size) {
788 			rc = FFA_INVALID_PARAMETERS;
789 			goto out;
790 		}
791 		buf = rxtx->rx;
792 	}
793 
794 	rc = add_mem_share_frag(s, buf, flen);
795 out:
796 	cpu_spin_unlock(&rxtx->spinlock);
797 
798 	if (rc <= 0 && mm) {
799 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
800 		tee_mm_free(mm);
801 	}
802 
803 	if (rc < 0) {
804 		ret_fid = FFA_ERROR;
805 		ret_w2 = rc;
806 	} else if (rc > 0) {
807 		ret_fid = FFA_MEM_FRAG_RX;
808 		ret_w3 = rc;
809 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
810 	} else {
811 		ret_fid = FFA_SUCCESS_32;
812 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
813 	}
814 
815 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
816 }
817 
818 static void handle_mem_reclaim(struct thread_smc_args *args)
819 {
820 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
821 	uint32_t ret_fid = FFA_ERROR;
822 	uint64_t cookie = 0;
823 
824 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
825 		goto out;
826 
827 	cookie = reg_pair_to_64(args->a2, args->a1);
828 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
829 	case TEE_SUCCESS:
830 		ret_fid = FFA_SUCCESS_32;
831 		ret_val = 0;
832 		break;
833 	case TEE_ERROR_ITEM_NOT_FOUND:
834 		DMSG("cookie %#"PRIx64" not found", cookie);
835 		ret_val = FFA_INVALID_PARAMETERS;
836 		break;
837 	default:
838 		DMSG("cookie %#"PRIx64" busy", cookie);
839 		ret_val = FFA_DENIED;
840 		break;
841 	}
842 out:
843 	set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
844 }
845 
846 /* Only called from assembly */
847 void thread_spmc_msg_recv(struct thread_smc_args *args);
848 void thread_spmc_msg_recv(struct thread_smc_args *args)
849 {
850 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
851 	switch (args->a0) {
852 	case FFA_VERSION:
853 		handle_version(args);
854 		break;
855 	case FFA_FEATURES:
856 		handle_features(args);
857 		break;
858 #ifdef ARM64
859 	case FFA_RXTX_MAP_64:
860 #endif
861 	case FFA_RXTX_MAP_32:
862 		spmc_handle_rxtx_map(args, &nw_rxtx);
863 		break;
864 	case FFA_RXTX_UNMAP:
865 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
866 		break;
867 	case FFA_RX_RELEASE:
868 		spmc_handle_rx_release(args, &nw_rxtx);
869 		break;
870 	case FFA_PARTITION_INFO_GET:
871 		handle_partition_info_get(args, &nw_rxtx);
872 		break;
873 	case FFA_INTERRUPT:
874 		itr_core_handler();
875 		set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
876 		break;
877 	case FFA_MSG_SEND_DIRECT_REQ_32:
878 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
879 		    FFA_DST(args->a1) != SPMC_ENDPOINT_ID) {
880 			spmc_sp_start_thread(args);
881 			break;
882 		}
883 
884 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
885 			handle_yielding_call(args);
886 		else
887 			handle_blocking_call(args);
888 		break;
889 #ifdef ARM64
890 	case FFA_MEM_SHARE_64:
891 #endif
892 	case FFA_MEM_SHARE_32:
893 		handle_mem_share(args, &nw_rxtx);
894 		break;
895 	case FFA_MEM_RECLAIM:
896 		handle_mem_reclaim(args);
897 		break;
898 	case FFA_MEM_FRAG_TX:
899 		handle_mem_frag_tx(args, &nw_rxtx);
900 		break;
901 	default:
902 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
903 		set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
904 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
905 	}
906 }
907 
908 static uint32_t yielding_call_with_arg(uint64_t cookie)
909 {
910 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
911 	struct optee_msg_arg *arg = NULL;
912 	struct mobj *mobj = NULL;
913 	uint32_t num_params = 0;
914 
915 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
916 	if (!mobj) {
917 		EMSG("Can't find cookie %#"PRIx64, cookie);
918 		return TEE_ERROR_BAD_PARAMETERS;
919 	}
920 
921 	rv = mobj_inc_map(mobj);
922 	if (rv)
923 		goto out_put_mobj;
924 
925 	rv = TEE_ERROR_BAD_PARAMETERS;
926 	arg = mobj_get_va(mobj, 0);
927 	if (!arg)
928 		goto out_dec_map;
929 
930 	if (!mobj_get_va(mobj, sizeof(*arg)))
931 		goto out_dec_map;
932 
933 	num_params = READ_ONCE(arg->num_params);
934 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
935 		goto out_dec_map;
936 
937 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
938 		goto out_dec_map;
939 
940 	rv = tee_entry_std(arg, num_params);
941 
942 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
943 
944 out_dec_map:
945 	mobj_dec_map(mobj);
946 out_put_mobj:
947 	mobj_put(mobj);
948 	return rv;
949 }
950 
951 static uint32_t yielding_unregister_shm(uint64_t cookie)
952 {
953 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
954 
955 	switch (res) {
956 	case TEE_SUCCESS:
957 	case TEE_ERROR_ITEM_NOT_FOUND:
958 		return 0;
959 	case TEE_ERROR_BUSY:
960 		EMSG("res %#"PRIx32, res);
961 		return FFA_BUSY;
962 	default:
963 		EMSG("res %#"PRIx32, res);
964 		return FFA_INVALID_PARAMETERS;
965 	}
966 }
967 
968 /*
969  * Helper routine for the assembly function thread_std_smc_entry()
970  *
971  * Note: this function is weak just to make it possible to exclude it from
972  * the unpaged area.
973  */
974 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
975 				       uint32_t a2, uint32_t a3)
976 {
977 	/*
978 	 * Arguments are supplied from handle_yielding_call() as:
979 	 * a0 <- w1
980 	 * a1 <- w3
981 	 * a2 <- w4
982 	 * a3 <- w5
983 	 */
984 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
985 	switch (a1) {
986 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
987 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
988 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
989 		return FFA_NOT_SUPPORTED;
990 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
991 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
992 	default:
993 		return FFA_DENIED;
994 	}
995 }
996 
997 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
998 {
999 	uint64_t offs = tpm->u.memref.offs;
1000 
1001 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1002 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1003 
1004 	param->u.fmem.offs_low = offs;
1005 	param->u.fmem.offs_high = offs >> 32;
1006 	if (param->u.fmem.offs_high != offs >> 32)
1007 		return false;
1008 
1009 	param->u.fmem.size = tpm->u.memref.size;
1010 	if (tpm->u.memref.mobj) {
1011 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
1012 		if (!param->u.fmem.global_id)
1013 			return false;
1014 	} else {
1015 		param->u.fmem.global_id = 0;
1016 	}
1017 
1018 	return true;
1019 }
1020 
1021 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
1022 {
1023 	TEE_Result res = TEE_SUCCESS;
1024 	struct thread_rpc_arg rpc_arg = { .call = {
1025 			.w1 = thread_get_tsd()->rpc_target_info,
1026 			.w4 = type,
1027 		},
1028 	};
1029 
1030 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
1031 	mobj_put(mobj);
1032 	res = mobj_ffa_unregister_by_cookie(cookie);
1033 	if (res)
1034 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
1035 		     cookie, res);
1036 	thread_rpc(&rpc_arg);
1037 }
1038 
1039 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1040 {
1041 	struct mobj *mobj = NULL;
1042 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1043 				  SMALL_PAGE_SIZE;
1044 	struct thread_rpc_arg rpc_arg = { .call = {
1045 			.w1 = thread_get_tsd()->rpc_target_info,
1046 			.w4 = type,
1047 			.w5 = page_count,
1048 		},
1049 	};
1050 	unsigned int internal_offset = 0;
1051 	uint64_t cookie = 0;
1052 
1053 	thread_rpc(&rpc_arg);
1054 
1055 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1056 	if (!cookie)
1057 		return NULL;
1058 	internal_offset = rpc_arg.ret.w6;
1059 
1060 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1061 	if (!mobj) {
1062 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1063 		     cookie, internal_offset);
1064 		return NULL;
1065 	}
1066 
1067 	assert(mobj_is_nonsec(mobj));
1068 
1069 	if (mobj_inc_map(mobj)) {
1070 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1071 		mobj_put(mobj);
1072 		return NULL;
1073 	}
1074 
1075 	return mobj;
1076 }
1077 
1078 struct mobj *thread_rpc_alloc_payload(size_t size)
1079 {
1080 	return thread_rpc_alloc(size,
1081 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1082 }
1083 
1084 void thread_rpc_free_payload(struct mobj *mobj)
1085 {
1086 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1087 			mobj_get_cookie(mobj), mobj);
1088 }
1089 
1090 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1091 {
1092 	return thread_rpc_alloc(size,
1093 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1094 }
1095 
1096 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1097 {
1098 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1099 			mobj_get_cookie(mobj), mobj);
1100 }
1101 
1102 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1103 			    struct thread_param *params,
1104 			    struct optee_msg_arg **arg_ret,
1105 			    uint64_t *carg_ret)
1106 {
1107 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1108 	struct thread_ctx *thr = threads + thread_get_id();
1109 	struct optee_msg_arg *arg = thr->rpc_arg;
1110 
1111 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1112 		return TEE_ERROR_BAD_PARAMETERS;
1113 
1114 	if (!arg) {
1115 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1116 
1117 		if (!mobj)
1118 			return TEE_ERROR_OUT_OF_MEMORY;
1119 
1120 		arg = mobj_get_va(mobj, 0);
1121 		if (!arg) {
1122 			thread_rpc_free_kernel_payload(mobj);
1123 			return TEE_ERROR_OUT_OF_MEMORY;
1124 		}
1125 
1126 		thr->rpc_arg = arg;
1127 		thr->rpc_mobj = mobj;
1128 	}
1129 
1130 	memset(arg, 0, sz);
1131 	arg->cmd = cmd;
1132 	arg->num_params = num_params;
1133 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1134 
1135 	for (size_t n = 0; n < num_params; n++) {
1136 		switch (params[n].attr) {
1137 		case THREAD_PARAM_ATTR_NONE:
1138 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1139 			break;
1140 		case THREAD_PARAM_ATTR_VALUE_IN:
1141 		case THREAD_PARAM_ATTR_VALUE_OUT:
1142 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1143 			arg->params[n].attr = params[n].attr -
1144 					      THREAD_PARAM_ATTR_VALUE_IN +
1145 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1146 			arg->params[n].u.value.a = params[n].u.value.a;
1147 			arg->params[n].u.value.b = params[n].u.value.b;
1148 			arg->params[n].u.value.c = params[n].u.value.c;
1149 			break;
1150 		case THREAD_PARAM_ATTR_MEMREF_IN:
1151 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1152 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1153 			if (!set_fmem(arg->params + n, params + n))
1154 				return TEE_ERROR_BAD_PARAMETERS;
1155 			break;
1156 		default:
1157 			return TEE_ERROR_BAD_PARAMETERS;
1158 		}
1159 	}
1160 
1161 	*arg_ret = arg;
1162 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1163 
1164 	return TEE_SUCCESS;
1165 }
1166 
1167 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1168 				struct thread_param *params)
1169 {
1170 	for (size_t n = 0; n < num_params; n++) {
1171 		switch (params[n].attr) {
1172 		case THREAD_PARAM_ATTR_VALUE_OUT:
1173 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1174 			params[n].u.value.a = arg->params[n].u.value.a;
1175 			params[n].u.value.b = arg->params[n].u.value.b;
1176 			params[n].u.value.c = arg->params[n].u.value.c;
1177 			break;
1178 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1179 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1180 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1181 			break;
1182 		default:
1183 			break;
1184 		}
1185 	}
1186 
1187 	return arg->ret;
1188 }
1189 
1190 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1191 			struct thread_param *params)
1192 {
1193 	struct thread_rpc_arg rpc_arg = { .call = {
1194 			.w1 = thread_get_tsd()->rpc_target_info,
1195 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1196 		},
1197 	};
1198 	uint64_t carg = 0;
1199 	struct optee_msg_arg *arg = NULL;
1200 	uint32_t ret = 0;
1201 
1202 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1203 	if (ret)
1204 		return ret;
1205 
1206 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1207 	thread_rpc(&rpc_arg);
1208 
1209 	return get_rpc_arg_res(arg, num_params, params);
1210 }
1211 
1212 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1213 {
1214 	return NULL;
1215 }
1216 
1217 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1218 {
1219 	/*
1220 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1221 	 * returns NULL.
1222 	 */
1223 	volatile bool cant_happen __maybe_unused = true;
1224 
1225 	assert(!cant_happen);
1226 }
1227