xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision bbfe5da7daee2ab8efcb94fe11938d444b940384)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/secure_partition.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/spmc_sp_handler.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_spmc.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <optee_ffa.h>
21 #include <optee_msg.h>
22 #include <optee_rpc_cmd.h>
23 #include <string.h>
24 #include <sys/queue.h>
25 #include <tee/entry_std.h>
26 #include <util.h>
27 
28 #include "thread_private.h"
29 
30 /* Table 39: Constituent memory region descriptor */
31 struct constituent_address_range {
32 	uint64_t address;
33 	uint32_t page_count;
34 	uint32_t reserved;
35 };
36 
37 /* Table 38: Composite memory region descriptor */
38 struct mem_region_descr {
39 	uint32_t total_page_count;
40 	uint32_t address_range_count;
41 	uint64_t reserved;
42 	struct constituent_address_range address_range_array[];
43 };
44 
45 /* Table 40: Memory access permissions descriptor */
46 struct mem_access_perm_descr {
47 	uint16_t endpoint_id;
48 	uint8_t access_perm;
49 	uint8_t flags;
50 };
51 
52 /* Table 41: Endpoint memory access descriptor */
53 struct mem_accsess_descr {
54 	struct mem_access_perm_descr mem_access_perm_descr;
55 	uint32_t mem_region_offs;
56 	uint64_t reserved;
57 };
58 
59 /* Table 44: Lend, donate or share memory transaction descriptor */
60 struct mem_transaction_descr {
61 	uint16_t sender_id;
62 	uint8_t mem_reg_attr;
63 	uint8_t reserved0;
64 	uint32_t flags;
65 	uint64_t global_handle;
66 	uint64_t tag;
67 	uint32_t reserved1;
68 	uint32_t mem_access_descr_count;
69 	struct mem_accsess_descr mem_accsess_descr_array[];
70 };
71 
72 struct ffa_partition_info {
73 	uint16_t id;
74 	uint16_t execution_context;
75 	uint32_t partition_properties;
76 };
77 
78 struct mem_share_state {
79 	struct mobj_ffa *mf;
80 	unsigned int page_count;
81 	unsigned int region_count;
82 	unsigned int current_page_idx;
83 };
84 
85 struct mem_frag_state {
86 	struct mem_share_state share;
87 	tee_mm_entry_t *mm;
88 	unsigned int frag_offset;
89 	SLIST_ENTRY(mem_frag_state) link;
90 };
91 
92 /*
93  * If @rxtx_size is 0 RX/TX buffers are not mapped or initialized.
94  *
95  * @rxtx_spinlock protects the variables below from concurrent access
96  * this includes the use of content of @rx_buf and @frag_state_head.
97  *
98  * @tx_buf_is_mine is true when we may write to tx_buf and false when it is
99  * owned by normal world.
100  *
101  * Note that we can't prevent normal world from updating the content of
102  * these buffers so we must always be careful when reading. while we hold
103  * the lock.
104  */
105 static void *rx_buf;
106 static void *tx_buf;
107 static unsigned int rxtx_size;
108 static unsigned int rxtx_spinlock;
109 static bool tx_buf_is_mine;
110 
111 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
112 	SLIST_HEAD_INITIALIZER(&frag_state_head);
113 
114 static uint32_t swap_src_dst(uint32_t src_dst)
115 {
116 	return (src_dst >> 16) | (src_dst << 16);
117 }
118 
119 static void set_args(struct thread_smc_args *args, uint32_t fid,
120 		     uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
121 		     uint32_t w5)
122 {
123 	*args = (struct thread_smc_args){ .a0 = fid,
124 					  .a1 = src_dst,
125 					  .a2 = w2,
126 					  .a3 = w3,
127 					  .a4 = w4,
128 					  .a5 = w5, };
129 }
130 
131 static void handle_version(struct thread_smc_args *args)
132 {
133 	/*
134 	 * We currently only support one version, 1.0 so let's keep it
135 	 * simple.
136 	 */
137 	set_args(args, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
138 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
139 		 FFA_PARAM_MBZ);
140 }
141 
142 static void handle_features(struct thread_smc_args *args)
143 {
144 	uint32_t ret_fid = 0;
145 	uint32_t ret_w2 = FFA_PARAM_MBZ;
146 
147 	switch (args->a1) {
148 #ifdef ARM64
149 	case FFA_RXTX_MAP_64:
150 #endif
151 	case FFA_RXTX_MAP_32:
152 		ret_fid = FFA_SUCCESS_32;
153 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
154 		break;
155 #ifdef ARM64
156 	case FFA_MEM_SHARE_64:
157 #endif
158 	case FFA_MEM_SHARE_32:
159 		ret_fid = FFA_SUCCESS_32;
160 		/*
161 		 * Partition manager supports transmission of a memory
162 		 * transaction descriptor in a buffer dynamically allocated
163 		 * by the endpoint.
164 		 */
165 		ret_w2 = BIT(0);
166 		break;
167 
168 	case FFA_ERROR:
169 	case FFA_VERSION:
170 	case FFA_SUCCESS_32:
171 #ifdef ARM64
172 	case FFA_SUCCESS_64:
173 #endif
174 	case FFA_MEM_FRAG_TX:
175 	case FFA_MEM_RECLAIM:
176 	case FFA_MSG_SEND_DIRECT_REQ_32:
177 	case FFA_INTERRUPT:
178 	case FFA_PARTITION_INFO_GET:
179 	case FFA_RX_RELEASE:
180 		ret_fid = FFA_SUCCESS_32;
181 		break;
182 	default:
183 		ret_fid = FFA_ERROR;
184 		ret_w2 = FFA_NOT_SUPPORTED;
185 		break;
186 	}
187 
188 	set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2,
189 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
190 }
191 
192 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
193 {
194 	tee_mm_entry_t *mm = NULL;
195 
196 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
197 		return FFA_INVALID_PARAMETERS;
198 
199 	mm = tee_mm_alloc(&tee_mm_shm, sz);
200 	if (!mm)
201 		return FFA_NO_MEMORY;
202 
203 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
204 					  sz / SMALL_PAGE_SIZE,
205 					  MEM_AREA_NSEC_SHM)) {
206 		tee_mm_free(mm);
207 		return FFA_INVALID_PARAMETERS;
208 	}
209 
210 	*va_ret = (void *)tee_mm_get_smem(mm);
211 	return 0;
212 }
213 
214 static void unmap_buf(void *va, size_t sz)
215 {
216 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
217 
218 	assert(mm);
219 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
220 	tee_mm_free(mm);
221 }
222 
223 static void handle_rxtx_map(struct thread_smc_args *args)
224 {
225 	int rc = 0;
226 	uint32_t ret_fid = FFA_ERROR;
227 	unsigned int sz = 0;
228 	paddr_t rx_pa = 0;
229 	paddr_t tx_pa = 0;
230 	void *rx = NULL;
231 	void *tx = NULL;
232 
233 	cpu_spin_lock(&rxtx_spinlock);
234 
235 	if (args->a3 & GENMASK_64(63, 6)) {
236 		rc = FFA_INVALID_PARAMETERS;
237 		goto out;
238 	}
239 
240 	sz = args->a3 * SMALL_PAGE_SIZE;
241 	if (!sz) {
242 		rc = FFA_INVALID_PARAMETERS;
243 		goto out;
244 	}
245 	/* TX/RX are swapped compared to the caller */
246 	tx_pa = args->a2;
247 	rx_pa = args->a1;
248 
249 	if (rxtx_size) {
250 		rc = FFA_DENIED;
251 		goto out;
252 	}
253 
254 	rc = map_buf(tx_pa, sz, &tx);
255 	if (rc)
256 		goto out;
257 	rc = map_buf(rx_pa, sz, &rx);
258 	if (rc) {
259 		unmap_buf(tx, sz);
260 		goto out;
261 	}
262 
263 	tx_buf = tx;
264 	rx_buf = rx;
265 	rxtx_size = sz;
266 	tx_buf_is_mine = true;
267 	ret_fid = FFA_SUCCESS_32;
268 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
269 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
270 out:
271 	cpu_spin_unlock(&rxtx_spinlock);
272 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
273 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
274 }
275 
276 static void handle_rxtx_unmap(struct thread_smc_args *args)
277 {
278 	uint32_t ret_fid = FFA_ERROR;
279 	int rc = FFA_INVALID_PARAMETERS;
280 
281 	cpu_spin_lock(&rxtx_spinlock);
282 
283 	if (!rxtx_size)
284 		goto out;
285 	unmap_buf(rx_buf, rxtx_size);
286 	unmap_buf(tx_buf, rxtx_size);
287 	rxtx_size = 0;
288 	rx_buf = NULL;
289 	tx_buf = NULL;
290 	ret_fid = FFA_SUCCESS_32;
291 	rc = 0;
292 out:
293 	cpu_spin_unlock(&rxtx_spinlock);
294 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
295 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
296 }
297 
298 static void handle_rx_release(struct thread_smc_args *args)
299 {
300 	uint32_t ret_fid = 0;
301 	int rc = 0;
302 
303 	cpu_spin_lock(&rxtx_spinlock);
304 	/* The senders RX is our TX */
305 	if (!rxtx_size || tx_buf_is_mine) {
306 		ret_fid = FFA_ERROR;
307 		rc = FFA_DENIED;
308 	} else {
309 		ret_fid = FFA_SUCCESS_32;
310 		rc = 0;
311 		tx_buf_is_mine = true;
312 	}
313 	cpu_spin_unlock(&rxtx_spinlock);
314 
315 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
316 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
317 }
318 
319 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
320 {
321 	return !w0 && !w1 && !w2 && !w3;
322 }
323 
324 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
325 {
326 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
327 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
328 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
329 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
330 }
331 
332 static void handle_partition_info_get(struct thread_smc_args *args)
333 {
334 	uint32_t ret_fid = 0;
335 	int rc = 0;
336 
337 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
338 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
339 		ret_fid = FFA_ERROR;
340 		rc = FFA_INVALID_PARAMETERS;
341 		goto out;
342 	}
343 
344 	cpu_spin_lock(&rxtx_spinlock);
345 	if (rxtx_size && tx_buf_is_mine) {
346 		struct ffa_partition_info *fpi = tx_buf;
347 
348 		fpi->id = SPMC_ENDPOINT_ID;
349 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
350 		fpi->partition_properties = BIT(0) | BIT(1);
351 
352 		ret_fid = FFA_SUCCESS_32;
353 		rc = 1;
354 		tx_buf_is_mine = false;
355 	} else {
356 		ret_fid = FFA_ERROR;
357 		if (rxtx_size)
358 			rc = FFA_BUSY;
359 		else
360 			rc = FFA_DENIED; /* TX buffer not setup yet */
361 	}
362 	cpu_spin_unlock(&rxtx_spinlock);
363 
364 out:
365 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
366 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
367 }
368 
369 static void handle_yielding_call(struct thread_smc_args *args)
370 {
371 	uint32_t ret_val = 0;
372 
373 	thread_check_canaries();
374 
375 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
376 		/* Note connection to struct thread_rpc_arg::ret */
377 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
378 				       0);
379 		ret_val = FFA_INVALID_PARAMETERS;
380 	} else {
381 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
382 		ret_val = FFA_BUSY;
383 	}
384 	set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
385 		 swap_src_dst(args->a1), 0, ret_val, 0, 0);
386 }
387 
388 static void handle_blocking_call(struct thread_smc_args *args)
389 {
390 	switch (args->a3) {
391 	case OPTEE_FFA_GET_API_VERSION:
392 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
393 			 swap_src_dst(args->a1), 0, OPTEE_FFA_VERSION_MAJOR,
394 			 OPTEE_FFA_VERSION_MINOR, 0);
395 		break;
396 	case OPTEE_FFA_GET_OS_VERSION:
397 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
398 			 swap_src_dst(args->a1), 0, CFG_OPTEE_REVISION_MAJOR,
399 			 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
400 		break;
401 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
402 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
403 			 swap_src_dst(args->a1), 0, 0, 0, 0);
404 		break;
405 	default:
406 		EMSG("Unhandled blocking service ID %#"PRIx32,
407 		     (uint32_t)args->a3);
408 		panic();
409 	}
410 }
411 
412 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
413 			 unsigned int num_mem_accs, uint8_t *acc_perms,
414 			 unsigned int *region_offs)
415 {
416 	unsigned int n = 0;
417 
418 	for (n = 0; n < num_mem_accs; n++) {
419 		struct mem_access_perm_descr *descr =
420 			&mem_acc[n].mem_access_perm_descr;
421 
422 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
423 			*acc_perms = READ_ONCE(descr->access_perm);
424 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
425 			return 0;
426 		}
427 	}
428 
429 	return FFA_INVALID_PARAMETERS;
430 }
431 
432 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
433 			  unsigned int *region_count, size_t *addr_range_offs)
434 {
435 	struct mem_region_descr *region_descr = NULL;
436 	struct mem_transaction_descr *descr = NULL;
437 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
438 	/* Normal memory, Write-Back cacheable, Inner shareable */
439 	const uint8_t exp_mem_reg_attr = 0x2f;
440 	unsigned int num_mem_accs = 0;
441 	uint8_t mem_acc_perm = 0;
442 	unsigned int region_descr_offs = 0;
443 	size_t n = 0;
444 
445 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
446 	    blen < sizeof(struct mem_transaction_descr))
447 		return FFA_INVALID_PARAMETERS;
448 
449 	descr = buf;
450 
451 	/* Check that the endpoint memory access descriptor array fits */
452 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
453 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
454 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
455 		return FFA_INVALID_PARAMETERS;
456 
457 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
458 		return FFA_INVALID_PARAMETERS;
459 
460 	/* Check that the access permissions matches what's expected */
461 	if (get_acc_perms(descr->mem_accsess_descr_array,
462 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
463 	    mem_acc_perm != exp_mem_acc_perm)
464 		return FFA_INVALID_PARAMETERS;
465 
466 	/* Check that the Composite memory region descriptor fits */
467 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
468 	    n > blen)
469 		return FFA_INVALID_PARAMETERS;
470 
471 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
472 			     struct mem_region_descr))
473 		return FFA_INVALID_PARAMETERS;
474 
475 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
476 						    region_descr_offs);
477 	*page_count = READ_ONCE(region_descr->total_page_count);
478 	*region_count = READ_ONCE(region_descr->address_range_count);
479 	*addr_range_offs = n;
480 	return 0;
481 }
482 
483 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
484 				size_t flen)
485 {
486 	unsigned int region_count = flen /
487 				    sizeof(struct constituent_address_range);
488 	struct constituent_address_range *arange = NULL;
489 	unsigned int n = 0;
490 
491 	if (region_count > s->region_count)
492 		region_count = s->region_count;
493 
494 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
495 		return FFA_INVALID_PARAMETERS;
496 	arange = buf;
497 
498 	for (n = 0; n < region_count; n++) {
499 		unsigned int page_count = READ_ONCE(arange[n].page_count);
500 		uint64_t addr = READ_ONCE(arange[n].address);
501 
502 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
503 					  addr, page_count))
504 			return FFA_INVALID_PARAMETERS;
505 	}
506 
507 	s->region_count -= region_count;
508 	if (s->region_count)
509 		return region_count * sizeof(*arange);
510 
511 	if (s->current_page_idx != s->page_count)
512 		return FFA_INVALID_PARAMETERS;
513 
514 	return 0;
515 }
516 
517 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
518 {
519 	int rc = 0;
520 
521 	rc = add_mem_share_helper(&s->share, buf, flen);
522 	if (rc >= 0) {
523 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
524 			if (s->share.region_count)
525 				return s->frag_offset;
526 			/* We're done, return the number of consumed bytes */
527 			rc = s->frag_offset;
528 		} else {
529 			rc = FFA_INVALID_PARAMETERS;
530 		}
531 	}
532 
533 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
534 	if (rc < 0)
535 		mobj_ffa_sel1_spmc_delete(s->share.mf);
536 	else
537 		mobj_ffa_push_to_inactive(s->share.mf);
538 	free(s);
539 
540 	return rc;
541 }
542 
543 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
544 			 size_t flen, uint64_t *global_handle)
545 {
546 	int rc = 0;
547 	struct mem_share_state share = { };
548 	size_t addr_range_offs = 0;
549 	size_t n = 0;
550 
551 	if (flen > blen)
552 		return FFA_INVALID_PARAMETERS;
553 
554 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
555 			    &addr_range_offs);
556 	if (rc)
557 		return rc;
558 
559 	if (MUL_OVERFLOW(share.region_count,
560 			 sizeof(struct constituent_address_range), &n) ||
561 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
562 		return FFA_INVALID_PARAMETERS;
563 
564 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
565 	if (!share.mf)
566 		return FFA_NO_MEMORY;
567 
568 	if (flen != blen) {
569 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
570 
571 		if (!s) {
572 			rc = FFA_NO_MEMORY;
573 			goto err;
574 		}
575 		s->share = share;
576 		s->mm = mm;
577 		s->frag_offset = addr_range_offs;
578 
579 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
580 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
581 					flen - addr_range_offs);
582 
583 		if (rc >= 0)
584 			*global_handle = mobj_ffa_get_cookie(share.mf);
585 
586 		return rc;
587 	}
588 
589 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
590 				  flen - addr_range_offs);
591 	if (rc) {
592 		/*
593 		 * Number of consumed bytes may be returned instead of 0 for
594 		 * done.
595 		 */
596 		rc = FFA_INVALID_PARAMETERS;
597 		goto err;
598 	}
599 
600 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
601 
602 	return 0;
603 err:
604 	mobj_ffa_sel1_spmc_delete(share.mf);
605 	return rc;
606 }
607 
608 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
609 				 unsigned int page_count,
610 				 uint64_t *global_handle)
611 {
612 	int rc = 0;
613 	size_t len = 0;
614 	tee_mm_entry_t *mm = NULL;
615 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
616 
617 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
618 		return FFA_INVALID_PARAMETERS;
619 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
620 		return FFA_INVALID_PARAMETERS;
621 
622 	/*
623 	 * Check that the length reported in blen is covered by len even
624 	 * if the offset is taken into account.
625 	 */
626 	if (len < blen || len - offs < blen)
627 		return FFA_INVALID_PARAMETERS;
628 
629 	mm = tee_mm_alloc(&tee_mm_shm, len);
630 	if (!mm)
631 		return FFA_NO_MEMORY;
632 
633 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
634 					  page_count, MEM_AREA_NSEC_SHM)) {
635 		rc = FFA_INVALID_PARAMETERS;
636 		goto out;
637 	}
638 
639 	cpu_spin_lock(&rxtx_spinlock);
640 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
641 			   global_handle);
642 	cpu_spin_unlock(&rxtx_spinlock);
643 	if (rc > 0)
644 		return rc;
645 
646 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
647 out:
648 	tee_mm_free(mm);
649 	return rc;
650 }
651 
652 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
653 				  uint64_t *global_handle)
654 {
655 	int rc = FFA_DENIED;
656 
657 	cpu_spin_lock(&rxtx_spinlock);
658 
659 	if (rx_buf && flen <= rxtx_size)
660 		rc = add_mem_share(NULL, rx_buf, blen, flen, global_handle);
661 
662 	cpu_spin_unlock(&rxtx_spinlock);
663 
664 	return rc;
665 }
666 
667 static void handle_mem_share(struct thread_smc_args *args)
668 {
669 	uint32_t ret_w1 = 0;
670 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
671 	uint32_t ret_w3 = 0;
672 	uint32_t ret_fid = FFA_ERROR;
673 	uint64_t global_handle = 0;
674 	int rc = 0;
675 
676 	/* Check that the MBZs are indeed 0 */
677 	if (args->a5 || args->a6 || args->a7)
678 		goto out;
679 
680 	if (!args->a3) {
681 		/*
682 		 * The memory transaction descriptor is passed via our rx
683 		 * buffer.
684 		 */
685 		if (args->a4)
686 			goto out;
687 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle);
688 	} else {
689 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
690 					   args->a4, &global_handle);
691 	}
692 	if (rc < 0) {
693 		ret_w2 = rc;
694 		goto out;
695 	}
696 	if (rc > 0) {
697 		ret_fid = FFA_MEM_FRAG_RX;
698 		ret_w3 = rc;
699 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
700 	}
701 	ret_fid = FFA_SUCCESS_32;
702 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
703 out:
704 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
705 }
706 
707 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
708 {
709 	struct mem_frag_state *s = NULL;
710 
711 	SLIST_FOREACH(s, &frag_state_head, link)
712 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
713 			return s;
714 
715 	return NULL;
716 }
717 
718 static void handle_mem_frag_tx(struct thread_smc_args *args)
719 {
720 	int rc = 0;
721 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
722 						READ_ONCE(args->a1));
723 	size_t flen = READ_ONCE(args->a3);
724 	struct mem_frag_state *s = NULL;
725 	tee_mm_entry_t *mm = NULL;
726 	unsigned int page_count = 0;
727 	void *buf = NULL;
728 	uint32_t ret_w1 = 0;
729 	uint32_t ret_w2 = 0;
730 	uint32_t ret_w3 = 0;
731 	uint32_t ret_fid = 0;
732 
733 	/*
734 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
735 	 * requests.
736 	 */
737 
738 	cpu_spin_lock(&rxtx_spinlock);
739 
740 	s = get_frag_state(global_handle);
741 	if (!s) {
742 		rc = FFA_INVALID_PARAMETERS;
743 		goto out;
744 	}
745 
746 	mm = s->mm;
747 	if (mm) {
748 		if (flen > tee_mm_get_bytes(mm)) {
749 			rc = FFA_INVALID_PARAMETERS;
750 			goto out;
751 		}
752 		page_count = s->share.page_count;
753 		buf = (void *)tee_mm_get_smem(mm);
754 	} else {
755 		if (flen > rxtx_size) {
756 			rc = FFA_INVALID_PARAMETERS;
757 			goto out;
758 		}
759 		buf = rx_buf;
760 	}
761 
762 	rc = add_mem_share_frag(s, buf, flen);
763 out:
764 	cpu_spin_unlock(&rxtx_spinlock);
765 
766 	if (rc <= 0 && mm) {
767 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
768 		tee_mm_free(mm);
769 	}
770 
771 	if (rc < 0) {
772 		ret_fid = FFA_ERROR;
773 		ret_w2 = rc;
774 	} else if (rc > 0) {
775 		ret_fid = FFA_MEM_FRAG_RX;
776 		ret_w3 = rc;
777 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
778 	} else {
779 		ret_fid = FFA_SUCCESS_32;
780 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
781 	}
782 
783 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
784 }
785 
786 static void handle_mem_reclaim(struct thread_smc_args *args)
787 {
788 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
789 	uint32_t ret_fid = FFA_ERROR;
790 	uint64_t cookie = 0;
791 
792 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
793 		goto out;
794 
795 	cookie = reg_pair_to_64(args->a2, args->a1);
796 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
797 	case TEE_SUCCESS:
798 		ret_fid = FFA_SUCCESS_32;
799 		ret_val = 0;
800 		break;
801 	case TEE_ERROR_ITEM_NOT_FOUND:
802 		DMSG("cookie %#"PRIx64" not found", cookie);
803 		ret_val = FFA_INVALID_PARAMETERS;
804 		break;
805 	default:
806 		DMSG("cookie %#"PRIx64" busy", cookie);
807 		ret_val = FFA_DENIED;
808 		break;
809 	}
810 out:
811 	set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
812 }
813 
814 /* Only called from assembly */
815 void thread_spmc_msg_recv(struct thread_smc_args *args);
816 void thread_spmc_msg_recv(struct thread_smc_args *args)
817 {
818 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
819 	switch (args->a0) {
820 	case FFA_VERSION:
821 		handle_version(args);
822 		break;
823 	case FFA_FEATURES:
824 		handle_features(args);
825 		break;
826 #ifdef ARM64
827 	case FFA_RXTX_MAP_64:
828 #endif
829 	case FFA_RXTX_MAP_32:
830 		handle_rxtx_map(args);
831 		break;
832 	case FFA_RXTX_UNMAP:
833 		handle_rxtx_unmap(args);
834 		break;
835 	case FFA_RX_RELEASE:
836 		handle_rx_release(args);
837 		break;
838 	case FFA_PARTITION_INFO_GET:
839 		handle_partition_info_get(args);
840 		break;
841 	case FFA_INTERRUPT:
842 		itr_core_handler();
843 		set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
844 		break;
845 	case FFA_MSG_SEND_DIRECT_REQ_32:
846 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
847 		    FFA_DST(args->a1) != SPMC_ENDPOINT_ID) {
848 			spmc_sp_start_thread(args);
849 			break;
850 		}
851 
852 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
853 			handle_yielding_call(args);
854 		else
855 			handle_blocking_call(args);
856 		break;
857 #ifdef ARM64
858 	case FFA_MEM_SHARE_64:
859 #endif
860 	case FFA_MEM_SHARE_32:
861 		handle_mem_share(args);
862 		break;
863 	case FFA_MEM_RECLAIM:
864 		handle_mem_reclaim(args);
865 		break;
866 	case FFA_MEM_FRAG_TX:
867 		handle_mem_frag_tx(args);
868 		break;
869 	default:
870 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
871 		set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
872 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
873 	}
874 }
875 
876 static uint32_t yielding_call_with_arg(uint64_t cookie)
877 {
878 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
879 	struct optee_msg_arg *arg = NULL;
880 	struct mobj *mobj = NULL;
881 	uint32_t num_params = 0;
882 
883 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
884 	if (!mobj) {
885 		EMSG("Can't find cookie %#"PRIx64, cookie);
886 		return TEE_ERROR_BAD_PARAMETERS;
887 	}
888 
889 	rv = mobj_inc_map(mobj);
890 	if (rv)
891 		goto out_put_mobj;
892 
893 	rv = TEE_ERROR_BAD_PARAMETERS;
894 	arg = mobj_get_va(mobj, 0);
895 	if (!arg)
896 		goto out_dec_map;
897 
898 	if (!mobj_get_va(mobj, sizeof(*arg)))
899 		goto out_dec_map;
900 
901 	num_params = READ_ONCE(arg->num_params);
902 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
903 		goto out_dec_map;
904 
905 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
906 		goto out_dec_map;
907 
908 	rv = tee_entry_std(arg, num_params);
909 
910 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
911 
912 out_dec_map:
913 	mobj_dec_map(mobj);
914 out_put_mobj:
915 	mobj_put(mobj);
916 	return rv;
917 }
918 
919 static uint32_t yielding_unregister_shm(uint64_t cookie)
920 {
921 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
922 
923 	switch (res) {
924 	case TEE_SUCCESS:
925 	case TEE_ERROR_ITEM_NOT_FOUND:
926 		return 0;
927 	case TEE_ERROR_BUSY:
928 		EMSG("res %#"PRIx32, res);
929 		return FFA_BUSY;
930 	default:
931 		EMSG("res %#"PRIx32, res);
932 		return FFA_INVALID_PARAMETERS;
933 	}
934 }
935 
936 /*
937  * Helper routine for the assembly function thread_std_smc_entry()
938  *
939  * Note: this function is weak just to make it possible to exclude it from
940  * the unpaged area.
941  */
942 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
943 				       uint32_t a2, uint32_t a3)
944 {
945 	/*
946 	 * Arguments are supplied from handle_yielding_call() as:
947 	 * a0 <- w1
948 	 * a1 <- w3
949 	 * a2 <- w4
950 	 * a3 <- w5
951 	 */
952 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
953 	switch (a1) {
954 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
955 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
956 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
957 		return FFA_NOT_SUPPORTED;
958 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
959 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
960 	default:
961 		return FFA_DENIED;
962 	}
963 }
964 
965 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
966 {
967 	uint64_t offs = tpm->u.memref.offs;
968 
969 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
970 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
971 
972 	param->u.fmem.offs_low = offs;
973 	param->u.fmem.offs_high = offs >> 32;
974 	if (param->u.fmem.offs_high != offs >> 32)
975 		return false;
976 
977 	param->u.fmem.size = tpm->u.memref.size;
978 	if (tpm->u.memref.mobj) {
979 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
980 		if (!param->u.fmem.global_id)
981 			return false;
982 	} else {
983 		param->u.fmem.global_id = 0;
984 	}
985 
986 	return true;
987 }
988 
989 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
990 {
991 	TEE_Result res = TEE_SUCCESS;
992 	struct thread_rpc_arg rpc_arg = { .call = {
993 			.w1 = thread_get_tsd()->rpc_target_info,
994 			.w4 = type,
995 		},
996 	};
997 
998 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
999 	mobj_put(mobj);
1000 	res = mobj_ffa_unregister_by_cookie(cookie);
1001 	if (res)
1002 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
1003 		     cookie, res);
1004 	thread_rpc(&rpc_arg);
1005 }
1006 
1007 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1008 {
1009 	struct mobj *mobj = NULL;
1010 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1011 				  SMALL_PAGE_SIZE;
1012 	struct thread_rpc_arg rpc_arg = { .call = {
1013 			.w1 = thread_get_tsd()->rpc_target_info,
1014 			.w4 = type,
1015 			.w5 = page_count,
1016 		},
1017 	};
1018 	unsigned int internal_offset = 0;
1019 	uint64_t cookie = 0;
1020 
1021 	thread_rpc(&rpc_arg);
1022 
1023 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1024 	if (!cookie)
1025 		return NULL;
1026 	internal_offset = rpc_arg.ret.w6;
1027 
1028 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1029 	if (!mobj) {
1030 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1031 		     cookie, internal_offset);
1032 		return NULL;
1033 	}
1034 
1035 	assert(mobj_is_nonsec(mobj));
1036 
1037 	if (mobj_inc_map(mobj)) {
1038 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1039 		mobj_put(mobj);
1040 		return NULL;
1041 	}
1042 
1043 	return mobj;
1044 }
1045 
1046 struct mobj *thread_rpc_alloc_payload(size_t size)
1047 {
1048 	return thread_rpc_alloc(size,
1049 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1050 }
1051 
1052 void thread_rpc_free_payload(struct mobj *mobj)
1053 {
1054 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1055 			mobj_get_cookie(mobj), mobj);
1056 }
1057 
1058 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1059 {
1060 	return thread_rpc_alloc(size,
1061 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1062 }
1063 
1064 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1065 {
1066 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1067 			mobj_get_cookie(mobj), mobj);
1068 }
1069 
1070 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1071 			    struct thread_param *params,
1072 			    struct optee_msg_arg **arg_ret,
1073 			    uint64_t *carg_ret)
1074 {
1075 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1076 	struct thread_ctx *thr = threads + thread_get_id();
1077 	struct optee_msg_arg *arg = thr->rpc_arg;
1078 
1079 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1080 		return TEE_ERROR_BAD_PARAMETERS;
1081 
1082 	if (!arg) {
1083 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1084 
1085 		if (!mobj)
1086 			return TEE_ERROR_OUT_OF_MEMORY;
1087 
1088 		arg = mobj_get_va(mobj, 0);
1089 		if (!arg) {
1090 			thread_rpc_free_kernel_payload(mobj);
1091 			return TEE_ERROR_OUT_OF_MEMORY;
1092 		}
1093 
1094 		thr->rpc_arg = arg;
1095 		thr->rpc_mobj = mobj;
1096 	}
1097 
1098 	memset(arg, 0, sz);
1099 	arg->cmd = cmd;
1100 	arg->num_params = num_params;
1101 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1102 
1103 	for (size_t n = 0; n < num_params; n++) {
1104 		switch (params[n].attr) {
1105 		case THREAD_PARAM_ATTR_NONE:
1106 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1107 			break;
1108 		case THREAD_PARAM_ATTR_VALUE_IN:
1109 		case THREAD_PARAM_ATTR_VALUE_OUT:
1110 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1111 			arg->params[n].attr = params[n].attr -
1112 					      THREAD_PARAM_ATTR_VALUE_IN +
1113 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1114 			arg->params[n].u.value.a = params[n].u.value.a;
1115 			arg->params[n].u.value.b = params[n].u.value.b;
1116 			arg->params[n].u.value.c = params[n].u.value.c;
1117 			break;
1118 		case THREAD_PARAM_ATTR_MEMREF_IN:
1119 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1120 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1121 			if (!set_fmem(arg->params + n, params + n))
1122 				return TEE_ERROR_BAD_PARAMETERS;
1123 			break;
1124 		default:
1125 			return TEE_ERROR_BAD_PARAMETERS;
1126 		}
1127 	}
1128 
1129 	*arg_ret = arg;
1130 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1131 
1132 	return TEE_SUCCESS;
1133 }
1134 
1135 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1136 				struct thread_param *params)
1137 {
1138 	for (size_t n = 0; n < num_params; n++) {
1139 		switch (params[n].attr) {
1140 		case THREAD_PARAM_ATTR_VALUE_OUT:
1141 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1142 			params[n].u.value.a = arg->params[n].u.value.a;
1143 			params[n].u.value.b = arg->params[n].u.value.b;
1144 			params[n].u.value.c = arg->params[n].u.value.c;
1145 			break;
1146 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1147 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1148 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1149 			break;
1150 		default:
1151 			break;
1152 		}
1153 	}
1154 
1155 	return arg->ret;
1156 }
1157 
1158 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1159 			struct thread_param *params)
1160 {
1161 	struct thread_rpc_arg rpc_arg = { .call = {
1162 			.w1 = thread_get_tsd()->rpc_target_info,
1163 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1164 		},
1165 	};
1166 	uint64_t carg = 0;
1167 	struct optee_msg_arg *arg = NULL;
1168 	uint32_t ret = 0;
1169 
1170 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1171 	if (ret)
1172 		return ret;
1173 
1174 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1175 	thread_rpc(&rpc_arg);
1176 
1177 	return get_rpc_arg_res(arg, num_params, params);
1178 }
1179 
1180 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1181 {
1182 	return NULL;
1183 }
1184 
1185 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1186 {
1187 	/*
1188 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1189 	 * returns NULL.
1190 	 */
1191 	volatile bool cant_happen __maybe_unused = true;
1192 
1193 	assert(!cant_happen);
1194 }
1195