xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision a1d5c81f8834a9d2c6f4372cce2e59e70e709121)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/thread.h>
15 #include <mm/core_mmu.h>
16 #include <mm/mobj.h>
17 #include <optee_ffa.h>
18 #include <optee_msg.h>
19 #include <optee_rpc_cmd.h>
20 #include <string.h>
21 #include <sys/queue.h>
22 #include <tee/entry_std.h>
23 #include <util.h>
24 
25 #include "thread_private.h"
26 
27 /* Table 39: Constituent memory region descriptor */
28 struct constituent_address_range {
29 	uint64_t address;
30 	uint32_t page_count;
31 	uint32_t reserved;
32 };
33 
34 /* Table 38: Composite memory region descriptor */
35 struct mem_region_descr {
36 	uint32_t total_page_count;
37 	uint32_t address_range_count;
38 	uint64_t reserved;
39 	struct constituent_address_range address_range_array[];
40 };
41 
42 /* Table 40: Memory access permissions descriptor */
43 struct mem_access_perm_descr {
44 	uint16_t endpoint_id;
45 	uint8_t access_perm;
46 	uint8_t flags;
47 };
48 
49 /* Table 41: Endpoint memory access descriptor */
50 struct mem_accsess_descr {
51 	struct mem_access_perm_descr mem_access_perm_descr;
52 	uint32_t mem_region_offs;
53 	uint64_t reserved;
54 };
55 
56 /* Table 44: Lend, donate or share memory transaction descriptor */
57 struct mem_transaction_descr {
58 	uint16_t sender_id;
59 	uint8_t mem_reg_attr;
60 	uint8_t reserved0;
61 	uint32_t flags;
62 	uint64_t global_handle;
63 	uint64_t tag;
64 	uint32_t reserved1;
65 	uint32_t mem_access_descr_count;
66 	struct mem_accsess_descr mem_accsess_descr_array[];
67 };
68 
69 struct ffa_partition_info {
70 	uint16_t id;
71 	uint16_t execution_context;
72 	uint32_t partition_properties;
73 };
74 
75 struct mem_share_state {
76 	struct mobj_ffa *mf;
77 	unsigned int page_count;
78 	unsigned int region_count;
79 	unsigned int current_page_idx;
80 };
81 
82 struct mem_frag_state {
83 	struct mem_share_state share;
84 	tee_mm_entry_t *mm;
85 	unsigned int frag_offset;
86 	SLIST_ENTRY(mem_frag_state) link;
87 };
88 
89 static uint16_t my_sp_id = 0x8001;
90 
91 /*
92  * If @rxtx_size is 0 RX/TX buffers are not mapped or initialized.
93  *
94  * @rxtx_spinlock protects the variables below from concurrent access
95  * this includes the use of content of @rx_buf and @frag_state_head.
96  *
97  * @tx_buf_is_mine is true when we may write to tx_buf and false when it is
98  * owned by normal world.
99  *
100  * Note that we can't prevent normal world from updating the content of
101  * these buffers so we must always be careful when reading. while we hold
102  * the lock.
103  */
104 static void *rx_buf;
105 static void *tx_buf;
106 static unsigned int rxtx_size;
107 static unsigned int rxtx_spinlock;
108 static bool tx_buf_is_mine;
109 
110 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
111 	SLIST_HEAD_INITIALIZER(&frag_state_head);
112 
113 static uint32_t swap_src_dst(uint32_t src_dst)
114 {
115 	return (src_dst >> 16) | (src_dst << 16);
116 }
117 
118 static void set_args(struct thread_smc_args *args, uint32_t fid,
119 		     uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
120 		     uint32_t w5)
121 {
122 	*args = (struct thread_smc_args){ .a0 = fid,
123 					  .a1 = src_dst,
124 					  .a2 = w2,
125 					  .a3 = w3,
126 					  .a4 = w4,
127 					  .a5 = w5, };
128 }
129 
130 static void handle_version(struct thread_smc_args *args)
131 {
132 	/*
133 	 * We currently only support one version, 1.0 so let's keep it
134 	 * simple.
135 	 */
136 	set_args(args, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
137 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
138 		 FFA_PARAM_MBZ);
139 }
140 
141 static void handle_features(struct thread_smc_args *args)
142 {
143 	uint32_t ret_fid = 0;
144 	uint32_t ret_w2 = FFA_PARAM_MBZ;
145 
146 	switch (args->a1) {
147 #ifdef ARM64
148 	case FFA_RXTX_MAP_64:
149 #endif
150 	case FFA_RXTX_MAP_32:
151 		ret_fid = FFA_SUCCESS_32;
152 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
153 		break;
154 #ifdef ARM64
155 	case FFA_MEM_SHARE_64:
156 #endif
157 	case FFA_MEM_SHARE_32:
158 		ret_fid = FFA_SUCCESS_32;
159 		/*
160 		 * Partition manager supports transmission of a memory
161 		 * transaction descriptor in a buffer dynamically allocated
162 		 * by the endpoint.
163 		 */
164 		ret_w2 = BIT(0);
165 		break;
166 
167 	case FFA_ERROR:
168 	case FFA_VERSION:
169 	case FFA_SUCCESS_32:
170 #ifdef ARM64
171 	case FFA_SUCCESS_64:
172 #endif
173 	case FFA_MEM_FRAG_TX:
174 	case FFA_MEM_RECLAIM:
175 	case FFA_MSG_SEND_DIRECT_REQ_32:
176 	case FFA_INTERRUPT:
177 	case FFA_PARTITION_INFO_GET:
178 	case FFA_RX_RELEASE:
179 		ret_fid = FFA_SUCCESS_32;
180 		break;
181 	default:
182 		ret_fid = FFA_ERROR;
183 		ret_w2 = FFA_NOT_SUPPORTED;
184 		break;
185 	}
186 
187 	set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2,
188 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
189 }
190 
191 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
192 {
193 	tee_mm_entry_t *mm = NULL;
194 
195 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
196 		return FFA_INVALID_PARAMETERS;
197 
198 	mm = tee_mm_alloc(&tee_mm_shm, sz);
199 	if (!mm)
200 		return FFA_NO_MEMORY;
201 
202 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
203 					  sz / SMALL_PAGE_SIZE,
204 					  MEM_AREA_NSEC_SHM)) {
205 		tee_mm_free(mm);
206 		return FFA_INVALID_PARAMETERS;
207 	}
208 
209 	*va_ret = (void *)tee_mm_get_smem(mm);
210 	return 0;
211 }
212 
213 static void unmap_buf(void *va, size_t sz)
214 {
215 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
216 
217 	assert(mm);
218 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
219 	tee_mm_free(mm);
220 }
221 
222 static void handle_rxtx_map(struct thread_smc_args *args)
223 {
224 	int rc = 0;
225 	uint32_t ret_fid = FFA_ERROR;
226 	unsigned int sz = 0;
227 	paddr_t rx_pa = 0;
228 	paddr_t tx_pa = 0;
229 	void *rx = NULL;
230 	void *tx = NULL;
231 
232 	cpu_spin_lock(&rxtx_spinlock);
233 
234 	if (args->a3 & GENMASK_64(63, 6)) {
235 		rc = FFA_INVALID_PARAMETERS;
236 		goto out;
237 	}
238 
239 	sz = args->a3 * SMALL_PAGE_SIZE;
240 	if (!sz) {
241 		rc = FFA_INVALID_PARAMETERS;
242 		goto out;
243 	}
244 	/* TX/RX are swapped compared to the caller */
245 	tx_pa = args->a2;
246 	rx_pa = args->a1;
247 
248 	if (rxtx_size) {
249 		rc = FFA_DENIED;
250 		goto out;
251 	}
252 
253 	rc = map_buf(tx_pa, sz, &tx);
254 	if (rc)
255 		goto out;
256 	rc = map_buf(rx_pa, sz, &rx);
257 	if (rc) {
258 		unmap_buf(tx, sz);
259 		goto out;
260 	}
261 
262 	tx_buf = tx;
263 	rx_buf = rx;
264 	rxtx_size = sz;
265 	tx_buf_is_mine = true;
266 	ret_fid = FFA_SUCCESS_32;
267 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
268 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
269 out:
270 	cpu_spin_unlock(&rxtx_spinlock);
271 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
272 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
273 }
274 
275 static void handle_rxtx_unmap(struct thread_smc_args *args)
276 {
277 	uint32_t ret_fid = FFA_ERROR;
278 	int rc = FFA_INVALID_PARAMETERS;
279 
280 	cpu_spin_lock(&rxtx_spinlock);
281 
282 	if (!rxtx_size)
283 		goto out;
284 	unmap_buf(rx_buf, rxtx_size);
285 	unmap_buf(tx_buf, rxtx_size);
286 	rxtx_size = 0;
287 	rx_buf = NULL;
288 	tx_buf = NULL;
289 	ret_fid = FFA_SUCCESS_32;
290 	rc = 0;
291 out:
292 	cpu_spin_unlock(&rxtx_spinlock);
293 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
294 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
295 }
296 
297 static void handle_rx_release(struct thread_smc_args *args)
298 {
299 	uint32_t ret_fid = 0;
300 	int rc = 0;
301 
302 	cpu_spin_lock(&rxtx_spinlock);
303 	/* The senders RX is our TX */
304 	if (!rxtx_size || tx_buf_is_mine) {
305 		ret_fid = FFA_ERROR;
306 		rc = FFA_DENIED;
307 	} else {
308 		ret_fid = FFA_SUCCESS_32;
309 		rc = 0;
310 		tx_buf_is_mine = true;
311 	}
312 	cpu_spin_unlock(&rxtx_spinlock);
313 
314 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
315 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
316 }
317 
318 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
319 {
320 	return !w0 && !w1 && !w2 && !w3;
321 }
322 
323 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
324 {
325 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
326 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
327 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
328 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
329 }
330 
331 static void handle_partition_info_get(struct thread_smc_args *args)
332 {
333 	uint32_t ret_fid = 0;
334 	int rc = 0;
335 
336 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
337 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
338 		ret_fid = FFA_ERROR;
339 		rc = FFA_INVALID_PARAMETERS;
340 		goto out;
341 	}
342 
343 	cpu_spin_lock(&rxtx_spinlock);
344 	if (rxtx_size && tx_buf_is_mine) {
345 		struct ffa_partition_info *fpi = tx_buf;
346 
347 		fpi->id = my_sp_id;
348 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
349 		fpi->partition_properties = BIT(0) | BIT(1);
350 
351 		ret_fid = FFA_SUCCESS_32;
352 		rc = 1;
353 		tx_buf_is_mine = false;
354 	} else {
355 		ret_fid = FFA_ERROR;
356 		if (rxtx_size)
357 			rc = FFA_BUSY;
358 		else
359 			rc = FFA_DENIED; /* TX buffer not setup yet */
360 	}
361 	cpu_spin_unlock(&rxtx_spinlock);
362 
363 out:
364 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
365 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
366 }
367 
368 static void handle_yielding_call(struct thread_smc_args *args)
369 {
370 	uint32_t ret_val = 0;
371 
372 	thread_check_canaries();
373 
374 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
375 		/* Note connection to struct thread_rpc_arg::ret */
376 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
377 				       0);
378 		ret_val = FFA_INVALID_PARAMETERS;
379 	} else {
380 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
381 		ret_val = FFA_BUSY;
382 	}
383 	set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
384 		 swap_src_dst(args->a1), 0, ret_val, 0, 0);
385 }
386 
387 static void handle_blocking_call(struct thread_smc_args *args)
388 {
389 	switch (args->a3) {
390 	case OPTEE_FFA_GET_API_VERSION:
391 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
392 			 swap_src_dst(args->a1), 0, OPTEE_FFA_VERSION_MAJOR,
393 			 OPTEE_FFA_VERSION_MINOR, 0);
394 		break;
395 	case OPTEE_FFA_GET_OS_VERSION:
396 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
397 			 swap_src_dst(args->a1), 0, CFG_OPTEE_REVISION_MAJOR,
398 			 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
399 		break;
400 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
401 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
402 			 swap_src_dst(args->a1), 0, 0, 0, 0);
403 		break;
404 	default:
405 		EMSG("Unhandled blocking service ID %#"PRIx32,
406 		     (uint32_t)args->a3);
407 		panic();
408 	}
409 }
410 
411 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
412 			 unsigned int num_mem_accs, uint8_t *acc_perms,
413 			 unsigned int *region_offs)
414 {
415 	unsigned int n = 0;
416 
417 	for (n = 0; n < num_mem_accs; n++) {
418 		struct mem_access_perm_descr *descr =
419 			&mem_acc[n].mem_access_perm_descr;
420 
421 		if (READ_ONCE(descr->endpoint_id) == my_sp_id) {
422 			*acc_perms = READ_ONCE(descr->access_perm);
423 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
424 			return 0;
425 		}
426 	}
427 
428 	return FFA_INVALID_PARAMETERS;
429 }
430 
431 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
432 			  unsigned int *region_count, size_t *addr_range_offs)
433 {
434 	struct mem_region_descr *region_descr = NULL;
435 	struct mem_transaction_descr *descr = NULL;
436 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
437 	/* Normal memory, Write-Back cacheable, Inner shareable */
438 	const uint8_t exp_mem_reg_attr = 0x2f;
439 	unsigned int num_mem_accs = 0;
440 	uint8_t mem_acc_perm = 0;
441 	unsigned int region_descr_offs = 0;
442 	size_t n = 0;
443 
444 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
445 	    blen < sizeof(struct mem_transaction_descr))
446 		return FFA_INVALID_PARAMETERS;
447 
448 	descr = buf;
449 
450 	/* Check that the endpoint memory access descriptor array fits */
451 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
452 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
453 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
454 		return FFA_INVALID_PARAMETERS;
455 
456 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
457 		return FFA_INVALID_PARAMETERS;
458 
459 	/* Check that the access permissions matches what's expected */
460 	if (get_acc_perms(descr->mem_accsess_descr_array,
461 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
462 	    mem_acc_perm != exp_mem_acc_perm)
463 		return FFA_INVALID_PARAMETERS;
464 
465 	/* Check that the Composite memory region descriptor fits */
466 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
467 	    n > blen)
468 		return FFA_INVALID_PARAMETERS;
469 
470 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
471 			     struct mem_region_descr))
472 		return FFA_INVALID_PARAMETERS;
473 
474 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
475 						    region_descr_offs);
476 	*page_count = READ_ONCE(region_descr->total_page_count);
477 	*region_count = READ_ONCE(region_descr->address_range_count);
478 	*addr_range_offs = n;
479 	return 0;
480 }
481 
482 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
483 				size_t flen)
484 {
485 	unsigned int region_count = flen /
486 				    sizeof(struct constituent_address_range);
487 	struct constituent_address_range *arange = NULL;
488 	unsigned int n = 0;
489 
490 	if (region_count > s->region_count)
491 		region_count = s->region_count;
492 
493 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
494 		return FFA_INVALID_PARAMETERS;
495 	arange = buf;
496 
497 	for (n = 0; n < region_count; n++) {
498 		unsigned int page_count = READ_ONCE(arange[n].page_count);
499 		uint64_t addr = READ_ONCE(arange[n].address);
500 
501 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
502 					  addr, page_count))
503 			return FFA_INVALID_PARAMETERS;
504 	}
505 
506 	s->region_count -= region_count;
507 	if (s->region_count)
508 		return region_count * sizeof(*arange);
509 
510 	if (s->current_page_idx != s->page_count)
511 		return FFA_INVALID_PARAMETERS;
512 
513 	return 0;
514 }
515 
516 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
517 {
518 	int rc = 0;
519 
520 	rc = add_mem_share_helper(&s->share, buf, flen);
521 	if (rc >= 0) {
522 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
523 			if (s->share.region_count)
524 				return s->frag_offset;
525 			/* We're done, return the number of consumed bytes */
526 			rc = s->frag_offset;
527 		} else {
528 			rc = FFA_INVALID_PARAMETERS;
529 		}
530 	}
531 
532 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
533 	if (rc < 0)
534 		mobj_ffa_sel1_spmc_delete(s->share.mf);
535 	else
536 		mobj_ffa_push_to_inactive(s->share.mf);
537 	free(s);
538 
539 	return rc;
540 }
541 
542 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
543 			 size_t flen, uint64_t *global_handle)
544 {
545 	int rc = 0;
546 	struct mem_share_state share = { };
547 	size_t addr_range_offs = 0;
548 	size_t n = 0;
549 
550 	if (flen > blen)
551 		return FFA_INVALID_PARAMETERS;
552 
553 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
554 			    &addr_range_offs);
555 	if (rc)
556 		return rc;
557 
558 	if (MUL_OVERFLOW(share.region_count,
559 			 sizeof(struct constituent_address_range), &n) ||
560 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
561 		return FFA_INVALID_PARAMETERS;
562 
563 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
564 	if (!share.mf)
565 		return FFA_NO_MEMORY;
566 
567 	if (flen != blen) {
568 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
569 
570 		if (!s) {
571 			rc = FFA_NO_MEMORY;
572 			goto err;
573 		}
574 		s->share = share;
575 		s->mm = mm;
576 		s->frag_offset = addr_range_offs;
577 
578 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
579 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
580 					flen - addr_range_offs);
581 
582 		if (rc >= 0)
583 			*global_handle = mobj_ffa_get_cookie(share.mf);
584 
585 		return rc;
586 	}
587 
588 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
589 				  flen - addr_range_offs);
590 	if (rc) {
591 		/*
592 		 * Number of consumed bytes may be returned instead of 0 for
593 		 * done.
594 		 */
595 		rc = FFA_INVALID_PARAMETERS;
596 		goto err;
597 	}
598 
599 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
600 
601 	return 0;
602 err:
603 	mobj_ffa_sel1_spmc_delete(share.mf);
604 	return rc;
605 }
606 
607 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
608 				 unsigned int page_count,
609 				 uint64_t *global_handle)
610 {
611 	int rc = 0;
612 	size_t len = 0;
613 	tee_mm_entry_t *mm = NULL;
614 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
615 
616 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
617 		return FFA_INVALID_PARAMETERS;
618 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
619 		return FFA_INVALID_PARAMETERS;
620 
621 	/*
622 	 * Check that the length reported in blen is covered by len even
623 	 * if the offset is taken into account.
624 	 */
625 	if (len < blen || len - offs < blen)
626 		return FFA_INVALID_PARAMETERS;
627 
628 	mm = tee_mm_alloc(&tee_mm_shm, len);
629 	if (!mm)
630 		return FFA_NO_MEMORY;
631 
632 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
633 					  page_count, MEM_AREA_NSEC_SHM)) {
634 		rc = FFA_INVALID_PARAMETERS;
635 		goto out;
636 	}
637 
638 	cpu_spin_lock(&rxtx_spinlock);
639 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
640 			   global_handle);
641 	cpu_spin_unlock(&rxtx_spinlock);
642 	if (rc > 0)
643 		return rc;
644 
645 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
646 out:
647 	tee_mm_free(mm);
648 	return rc;
649 }
650 
651 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
652 				  uint64_t *global_handle)
653 {
654 	int rc = FFA_DENIED;
655 
656 	cpu_spin_lock(&rxtx_spinlock);
657 
658 	if (rx_buf && flen <= rxtx_size)
659 		rc = add_mem_share(NULL, rx_buf, blen, flen, global_handle);
660 
661 	cpu_spin_unlock(&rxtx_spinlock);
662 
663 	return rc;
664 }
665 
666 static void handle_mem_share(struct thread_smc_args *args)
667 {
668 	uint32_t ret_w1 = 0;
669 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
670 	uint32_t ret_w3 = 0;
671 	uint32_t ret_fid = FFA_ERROR;
672 	uint64_t global_handle = 0;
673 	int rc = 0;
674 
675 	/* Check that the MBZs are indeed 0 */
676 	if (args->a5 || args->a6 || args->a7)
677 		goto out;
678 
679 	if (!args->a3) {
680 		/*
681 		 * The memory transaction descriptor is passed via our rx
682 		 * buffer.
683 		 */
684 		if (args->a4)
685 			goto out;
686 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle);
687 	} else {
688 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
689 					   args->a4, &global_handle);
690 	}
691 	if (rc < 0) {
692 		ret_w2 = rc;
693 		goto out;
694 	}
695 	if (rc > 0) {
696 		ret_fid = FFA_MEM_FRAG_RX;
697 		ret_w3 = rc;
698 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
699 	}
700 	ret_fid = FFA_SUCCESS_32;
701 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
702 out:
703 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
704 }
705 
706 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
707 {
708 	struct mem_frag_state *s = NULL;
709 
710 	SLIST_FOREACH(s, &frag_state_head, link)
711 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
712 			return s;
713 
714 	return NULL;
715 }
716 
717 static void handle_mem_frag_tx(struct thread_smc_args *args)
718 {
719 	int rc = 0;
720 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
721 						READ_ONCE(args->a1));
722 	size_t flen = READ_ONCE(args->a3);
723 	struct mem_frag_state *s = NULL;
724 	tee_mm_entry_t *mm = NULL;
725 	unsigned int page_count = 0;
726 	void *buf = NULL;
727 	uint32_t ret_w1 = 0;
728 	uint32_t ret_w2 = 0;
729 	uint32_t ret_w3 = 0;
730 	uint32_t ret_fid = 0;
731 
732 	/*
733 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
734 	 * requests.
735 	 */
736 
737 	cpu_spin_lock(&rxtx_spinlock);
738 
739 	s = get_frag_state(global_handle);
740 	if (!s) {
741 		rc = FFA_INVALID_PARAMETERS;
742 		goto out;
743 	}
744 
745 	mm = s->mm;
746 	if (mm) {
747 		if (flen > tee_mm_get_bytes(mm)) {
748 			rc = FFA_INVALID_PARAMETERS;
749 			goto out;
750 		}
751 		page_count = s->share.page_count;
752 		buf = (void *)tee_mm_get_smem(mm);
753 	} else {
754 		if (flen > rxtx_size) {
755 			rc = FFA_INVALID_PARAMETERS;
756 			goto out;
757 		}
758 		buf = rx_buf;
759 	}
760 
761 	rc = add_mem_share_frag(s, buf, flen);
762 out:
763 	cpu_spin_unlock(&rxtx_spinlock);
764 
765 	if (rc <= 0 && mm) {
766 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
767 		tee_mm_free(mm);
768 	}
769 
770 	if (rc < 0) {
771 		ret_fid = FFA_ERROR;
772 		ret_w2 = rc;
773 	} else if (rc > 0) {
774 		ret_fid = FFA_MEM_FRAG_RX;
775 		ret_w3 = rc;
776 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
777 	} else {
778 		ret_fid = FFA_SUCCESS_32;
779 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
780 	}
781 
782 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
783 }
784 
785 static void handle_mem_reclaim(struct thread_smc_args *args)
786 {
787 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
788 	uint32_t ret_fid = FFA_ERROR;
789 	uint64_t cookie = 0;
790 
791 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
792 		goto out;
793 
794 	cookie = reg_pair_to_64(args->a2, args->a1);
795 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
796 	case TEE_SUCCESS:
797 		ret_fid = FFA_SUCCESS_32;
798 		ret_val = 0;
799 		break;
800 	case TEE_ERROR_ITEM_NOT_FOUND:
801 		DMSG("cookie %#"PRIx64" not found", cookie);
802 		ret_val = FFA_INVALID_PARAMETERS;
803 		break;
804 	default:
805 		DMSG("cookie %#"PRIx64" busy", cookie);
806 		ret_val = FFA_DENIED;
807 		break;
808 	}
809 out:
810 	set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
811 }
812 
813 /* Only called from assembly */
814 void thread_spmc_msg_recv(struct thread_smc_args *args);
815 void thread_spmc_msg_recv(struct thread_smc_args *args)
816 {
817 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
818 	switch (args->a0) {
819 	case FFA_VERSION:
820 		handle_version(args);
821 		break;
822 	case FFA_FEATURES:
823 		handle_features(args);
824 		break;
825 #ifdef ARM64
826 	case FFA_RXTX_MAP_64:
827 #endif
828 	case FFA_RXTX_MAP_32:
829 		handle_rxtx_map(args);
830 		break;
831 	case FFA_RXTX_UNMAP:
832 		handle_rxtx_unmap(args);
833 		break;
834 	case FFA_RX_RELEASE:
835 		handle_rx_release(args);
836 		break;
837 	case FFA_PARTITION_INFO_GET:
838 		handle_partition_info_get(args);
839 		break;
840 	case FFA_INTERRUPT:
841 		itr_core_handler();
842 		set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
843 		break;
844 	case FFA_MSG_SEND_DIRECT_REQ_32:
845 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
846 			handle_yielding_call(args);
847 		else
848 			handle_blocking_call(args);
849 		break;
850 #ifdef ARM64
851 	case FFA_MEM_SHARE_64:
852 #endif
853 	case FFA_MEM_SHARE_32:
854 		handle_mem_share(args);
855 		break;
856 	case FFA_MEM_RECLAIM:
857 		handle_mem_reclaim(args);
858 		break;
859 	case FFA_MEM_FRAG_TX:
860 		handle_mem_frag_tx(args);
861 		break;
862 	default:
863 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
864 		set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
865 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
866 	}
867 }
868 
869 static uint32_t yielding_call_with_arg(uint64_t cookie)
870 {
871 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
872 	struct optee_msg_arg *arg = NULL;
873 	struct mobj *mobj = NULL;
874 	uint32_t num_params = 0;
875 
876 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
877 	if (!mobj) {
878 		EMSG("Can't find cookie %#"PRIx64, cookie);
879 		return TEE_ERROR_BAD_PARAMETERS;
880 	}
881 
882 	rv = mobj_inc_map(mobj);
883 	if (rv)
884 		goto out_put_mobj;
885 
886 	rv = TEE_ERROR_BAD_PARAMETERS;
887 	arg = mobj_get_va(mobj, 0);
888 	if (!arg)
889 		goto out_dec_map;
890 
891 	if (!mobj_get_va(mobj, sizeof(*arg)))
892 		goto out_dec_map;
893 
894 	num_params = READ_ONCE(arg->num_params);
895 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
896 		goto out_dec_map;
897 
898 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
899 		goto out_dec_map;
900 
901 	rv = tee_entry_std(arg, num_params);
902 
903 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
904 
905 out_dec_map:
906 	mobj_dec_map(mobj);
907 out_put_mobj:
908 	mobj_put(mobj);
909 	return rv;
910 }
911 
912 static uint32_t yielding_unregister_shm(uint64_t cookie)
913 {
914 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
915 
916 	switch (res) {
917 	case TEE_SUCCESS:
918 	case TEE_ERROR_ITEM_NOT_FOUND:
919 		return 0;
920 	case TEE_ERROR_BUSY:
921 		EMSG("res %#"PRIx32, res);
922 		return FFA_BUSY;
923 	default:
924 		EMSG("res %#"PRIx32, res);
925 		return FFA_INVALID_PARAMETERS;
926 	}
927 }
928 
929 /*
930  * Helper routine for the assembly function thread_std_smc_entry()
931  *
932  * Note: this function is weak just to make it possible to exclude it from
933  * the unpaged area.
934  */
935 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
936 				       uint32_t a2, uint32_t a3)
937 {
938 	/*
939 	 * Arguments are supplied from handle_yielding_call() as:
940 	 * a0 <- w1
941 	 * a1 <- w3
942 	 * a2 <- w4
943 	 * a3 <- w5
944 	 */
945 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
946 	switch (a1) {
947 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
948 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
949 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
950 		return FFA_NOT_SUPPORTED;
951 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
952 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
953 	default:
954 		return FFA_DENIED;
955 	}
956 }
957 
958 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
959 {
960 	uint64_t offs = tpm->u.memref.offs;
961 
962 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
963 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
964 
965 	param->u.fmem.offs_low = offs;
966 	param->u.fmem.offs_high = offs >> 32;
967 	if (param->u.fmem.offs_high != offs >> 32)
968 		return false;
969 
970 	param->u.fmem.size = tpm->u.memref.size;
971 	if (tpm->u.memref.mobj) {
972 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
973 		if (!param->u.fmem.global_id)
974 			return false;
975 	} else {
976 		param->u.fmem.global_id = 0;
977 	}
978 
979 	return true;
980 }
981 
982 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
983 {
984 	TEE_Result res = TEE_SUCCESS;
985 	struct thread_rpc_arg rpc_arg = { .call = {
986 			.w1 = thread_get_tsd()->rpc_target_info,
987 			.w4 = type,
988 		},
989 	};
990 
991 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
992 	mobj_put(mobj);
993 	res = mobj_ffa_unregister_by_cookie(cookie);
994 	if (res)
995 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
996 		     cookie, res);
997 	thread_rpc(&rpc_arg);
998 }
999 
1000 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1001 {
1002 	struct mobj *mobj = NULL;
1003 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1004 				  SMALL_PAGE_SIZE;
1005 	struct thread_rpc_arg rpc_arg = { .call = {
1006 			.w1 = thread_get_tsd()->rpc_target_info,
1007 			.w4 = type,
1008 			.w5 = page_count,
1009 		},
1010 	};
1011 	unsigned int internal_offset = 0;
1012 	uint64_t cookie = 0;
1013 
1014 	thread_rpc(&rpc_arg);
1015 
1016 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1017 	if (!cookie)
1018 		return NULL;
1019 	internal_offset = rpc_arg.ret.w6;
1020 
1021 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1022 	if (!mobj) {
1023 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1024 		     cookie, internal_offset);
1025 		return NULL;
1026 	}
1027 
1028 	assert(mobj_is_nonsec(mobj));
1029 
1030 	if (mobj_inc_map(mobj)) {
1031 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1032 		mobj_put(mobj);
1033 		return NULL;
1034 	}
1035 
1036 	return mobj;
1037 }
1038 
1039 struct mobj *thread_rpc_alloc_payload(size_t size)
1040 {
1041 	return thread_rpc_alloc(size,
1042 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1043 }
1044 
1045 void thread_rpc_free_payload(struct mobj *mobj)
1046 {
1047 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1048 			mobj_get_cookie(mobj), mobj);
1049 }
1050 
1051 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1052 {
1053 	return thread_rpc_alloc(size,
1054 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1055 }
1056 
1057 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1058 {
1059 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1060 			mobj_get_cookie(mobj), mobj);
1061 }
1062 
1063 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1064 			    struct thread_param *params,
1065 			    struct optee_msg_arg **arg_ret,
1066 			    uint64_t *carg_ret)
1067 {
1068 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1069 	struct thread_ctx *thr = threads + thread_get_id();
1070 	struct optee_msg_arg *arg = thr->rpc_arg;
1071 
1072 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1073 		return TEE_ERROR_BAD_PARAMETERS;
1074 
1075 	if (!arg) {
1076 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1077 
1078 		if (!mobj)
1079 			return TEE_ERROR_OUT_OF_MEMORY;
1080 
1081 		arg = mobj_get_va(mobj, 0);
1082 		if (!arg) {
1083 			thread_rpc_free_kernel_payload(mobj);
1084 			return TEE_ERROR_OUT_OF_MEMORY;
1085 		}
1086 
1087 		thr->rpc_arg = arg;
1088 		thr->rpc_mobj = mobj;
1089 	}
1090 
1091 	memset(arg, 0, sz);
1092 	arg->cmd = cmd;
1093 	arg->num_params = num_params;
1094 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1095 
1096 	for (size_t n = 0; n < num_params; n++) {
1097 		switch (params[n].attr) {
1098 		case THREAD_PARAM_ATTR_NONE:
1099 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1100 			break;
1101 		case THREAD_PARAM_ATTR_VALUE_IN:
1102 		case THREAD_PARAM_ATTR_VALUE_OUT:
1103 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1104 			arg->params[n].attr = params[n].attr -
1105 					      THREAD_PARAM_ATTR_VALUE_IN +
1106 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1107 			arg->params[n].u.value.a = params[n].u.value.a;
1108 			arg->params[n].u.value.b = params[n].u.value.b;
1109 			arg->params[n].u.value.c = params[n].u.value.c;
1110 			break;
1111 		case THREAD_PARAM_ATTR_MEMREF_IN:
1112 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1113 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1114 			if (!set_fmem(arg->params + n, params + n))
1115 				return TEE_ERROR_BAD_PARAMETERS;
1116 			break;
1117 		default:
1118 			return TEE_ERROR_BAD_PARAMETERS;
1119 		}
1120 	}
1121 
1122 	*arg_ret = arg;
1123 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1124 
1125 	return TEE_SUCCESS;
1126 }
1127 
1128 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1129 				struct thread_param *params)
1130 {
1131 	for (size_t n = 0; n < num_params; n++) {
1132 		switch (params[n].attr) {
1133 		case THREAD_PARAM_ATTR_VALUE_OUT:
1134 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1135 			params[n].u.value.a = arg->params[n].u.value.a;
1136 			params[n].u.value.b = arg->params[n].u.value.b;
1137 			params[n].u.value.c = arg->params[n].u.value.c;
1138 			break;
1139 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1140 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1141 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1142 			break;
1143 		default:
1144 			break;
1145 		}
1146 	}
1147 
1148 	return arg->ret;
1149 }
1150 
1151 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1152 			struct thread_param *params)
1153 {
1154 	struct thread_rpc_arg rpc_arg = { .call = {
1155 			.w1 = thread_get_tsd()->rpc_target_info,
1156 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1157 		},
1158 	};
1159 	uint64_t carg = 0;
1160 	struct optee_msg_arg *arg = NULL;
1161 	uint32_t ret = 0;
1162 
1163 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1164 	if (ret)
1165 		return ret;
1166 
1167 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1168 	thread_rpc(&rpc_arg);
1169 
1170 	return get_rpc_arg_res(arg, num_params, params);
1171 }
1172 
1173 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1174 {
1175 	return NULL;
1176 }
1177 
1178 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1179 {
1180 	/*
1181 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1182 	 * returns NULL.
1183 	 */
1184 	volatile bool cant_happen __maybe_unused = true;
1185 
1186 	assert(!cant_happen);
1187 }
1188