xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision c185655eafaab6b4b27759812cd6633e1da9db12)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_misc.h>
14 #include <kernel/thread.h>
15 #include <kernel/thread_spmc.h>
16 #include <mm/core_mmu.h>
17 #include <mm/mobj.h>
18 #include <optee_ffa.h>
19 #include <optee_msg.h>
20 #include <optee_rpc_cmd.h>
21 #include <string.h>
22 #include <sys/queue.h>
23 #include <tee/entry_std.h>
24 #include <util.h>
25 
26 #include "thread_private.h"
27 
28 /* Table 39: Constituent memory region descriptor */
29 struct constituent_address_range {
30 	uint64_t address;
31 	uint32_t page_count;
32 	uint32_t reserved;
33 };
34 
35 /* Table 38: Composite memory region descriptor */
36 struct mem_region_descr {
37 	uint32_t total_page_count;
38 	uint32_t address_range_count;
39 	uint64_t reserved;
40 	struct constituent_address_range address_range_array[];
41 };
42 
43 /* Table 40: Memory access permissions descriptor */
44 struct mem_access_perm_descr {
45 	uint16_t endpoint_id;
46 	uint8_t access_perm;
47 	uint8_t flags;
48 };
49 
50 /* Table 41: Endpoint memory access descriptor */
51 struct mem_accsess_descr {
52 	struct mem_access_perm_descr mem_access_perm_descr;
53 	uint32_t mem_region_offs;
54 	uint64_t reserved;
55 };
56 
57 /* Table 44: Lend, donate or share memory transaction descriptor */
58 struct mem_transaction_descr {
59 	uint16_t sender_id;
60 	uint8_t mem_reg_attr;
61 	uint8_t reserved0;
62 	uint32_t flags;
63 	uint64_t global_handle;
64 	uint64_t tag;
65 	uint32_t reserved1;
66 	uint32_t mem_access_descr_count;
67 	struct mem_accsess_descr mem_accsess_descr_array[];
68 };
69 
70 struct ffa_partition_info {
71 	uint16_t id;
72 	uint16_t execution_context;
73 	uint32_t partition_properties;
74 };
75 
76 struct mem_share_state {
77 	struct mobj_ffa *mf;
78 	unsigned int page_count;
79 	unsigned int region_count;
80 	unsigned int current_page_idx;
81 };
82 
83 struct mem_frag_state {
84 	struct mem_share_state share;
85 	tee_mm_entry_t *mm;
86 	unsigned int frag_offset;
87 	SLIST_ENTRY(mem_frag_state) link;
88 };
89 
90 /*
91  * If @rxtx_size is 0 RX/TX buffers are not mapped or initialized.
92  *
93  * @rxtx_spinlock protects the variables below from concurrent access
94  * this includes the use of content of @rx_buf and @frag_state_head.
95  *
96  * @tx_buf_is_mine is true when we may write to tx_buf and false when it is
97  * owned by normal world.
98  *
99  * Note that we can't prevent normal world from updating the content of
100  * these buffers so we must always be careful when reading. while we hold
101  * the lock.
102  */
103 static void *rx_buf;
104 static void *tx_buf;
105 static unsigned int rxtx_size;
106 static unsigned int rxtx_spinlock;
107 static bool tx_buf_is_mine;
108 
109 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
110 	SLIST_HEAD_INITIALIZER(&frag_state_head);
111 
112 static uint32_t swap_src_dst(uint32_t src_dst)
113 {
114 	return (src_dst >> 16) | (src_dst << 16);
115 }
116 
117 static void set_args(struct thread_smc_args *args, uint32_t fid,
118 		     uint32_t src_dst, uint32_t w2, uint32_t w3, uint32_t w4,
119 		     uint32_t w5)
120 {
121 	*args = (struct thread_smc_args){ .a0 = fid,
122 					  .a1 = src_dst,
123 					  .a2 = w2,
124 					  .a3 = w3,
125 					  .a4 = w4,
126 					  .a5 = w5, };
127 }
128 
129 static void handle_version(struct thread_smc_args *args)
130 {
131 	/*
132 	 * We currently only support one version, 1.0 so let's keep it
133 	 * simple.
134 	 */
135 	set_args(args, MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
136 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
137 		 FFA_PARAM_MBZ);
138 }
139 
140 static void handle_features(struct thread_smc_args *args)
141 {
142 	uint32_t ret_fid = 0;
143 	uint32_t ret_w2 = FFA_PARAM_MBZ;
144 
145 	switch (args->a1) {
146 #ifdef ARM64
147 	case FFA_RXTX_MAP_64:
148 #endif
149 	case FFA_RXTX_MAP_32:
150 		ret_fid = FFA_SUCCESS_32;
151 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
152 		break;
153 #ifdef ARM64
154 	case FFA_MEM_SHARE_64:
155 #endif
156 	case FFA_MEM_SHARE_32:
157 		ret_fid = FFA_SUCCESS_32;
158 		/*
159 		 * Partition manager supports transmission of a memory
160 		 * transaction descriptor in a buffer dynamically allocated
161 		 * by the endpoint.
162 		 */
163 		ret_w2 = BIT(0);
164 		break;
165 
166 	case FFA_ERROR:
167 	case FFA_VERSION:
168 	case FFA_SUCCESS_32:
169 #ifdef ARM64
170 	case FFA_SUCCESS_64:
171 #endif
172 	case FFA_MEM_FRAG_TX:
173 	case FFA_MEM_RECLAIM:
174 	case FFA_MSG_SEND_DIRECT_REQ_32:
175 	case FFA_INTERRUPT:
176 	case FFA_PARTITION_INFO_GET:
177 	case FFA_RX_RELEASE:
178 		ret_fid = FFA_SUCCESS_32;
179 		break;
180 	default:
181 		ret_fid = FFA_ERROR;
182 		ret_w2 = FFA_NOT_SUPPORTED;
183 		break;
184 	}
185 
186 	set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2,
187 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
188 }
189 
190 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
191 {
192 	tee_mm_entry_t *mm = NULL;
193 
194 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
195 		return FFA_INVALID_PARAMETERS;
196 
197 	mm = tee_mm_alloc(&tee_mm_shm, sz);
198 	if (!mm)
199 		return FFA_NO_MEMORY;
200 
201 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
202 					  sz / SMALL_PAGE_SIZE,
203 					  MEM_AREA_NSEC_SHM)) {
204 		tee_mm_free(mm);
205 		return FFA_INVALID_PARAMETERS;
206 	}
207 
208 	*va_ret = (void *)tee_mm_get_smem(mm);
209 	return 0;
210 }
211 
212 static void unmap_buf(void *va, size_t sz)
213 {
214 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
215 
216 	assert(mm);
217 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
218 	tee_mm_free(mm);
219 }
220 
221 static void handle_rxtx_map(struct thread_smc_args *args)
222 {
223 	int rc = 0;
224 	uint32_t ret_fid = FFA_ERROR;
225 	unsigned int sz = 0;
226 	paddr_t rx_pa = 0;
227 	paddr_t tx_pa = 0;
228 	void *rx = NULL;
229 	void *tx = NULL;
230 
231 	cpu_spin_lock(&rxtx_spinlock);
232 
233 	if (args->a3 & GENMASK_64(63, 6)) {
234 		rc = FFA_INVALID_PARAMETERS;
235 		goto out;
236 	}
237 
238 	sz = args->a3 * SMALL_PAGE_SIZE;
239 	if (!sz) {
240 		rc = FFA_INVALID_PARAMETERS;
241 		goto out;
242 	}
243 	/* TX/RX are swapped compared to the caller */
244 	tx_pa = args->a2;
245 	rx_pa = args->a1;
246 
247 	if (rxtx_size) {
248 		rc = FFA_DENIED;
249 		goto out;
250 	}
251 
252 	rc = map_buf(tx_pa, sz, &tx);
253 	if (rc)
254 		goto out;
255 	rc = map_buf(rx_pa, sz, &rx);
256 	if (rc) {
257 		unmap_buf(tx, sz);
258 		goto out;
259 	}
260 
261 	tx_buf = tx;
262 	rx_buf = rx;
263 	rxtx_size = sz;
264 	tx_buf_is_mine = true;
265 	ret_fid = FFA_SUCCESS_32;
266 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
267 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
268 out:
269 	cpu_spin_unlock(&rxtx_spinlock);
270 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
271 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
272 }
273 
274 static void handle_rxtx_unmap(struct thread_smc_args *args)
275 {
276 	uint32_t ret_fid = FFA_ERROR;
277 	int rc = FFA_INVALID_PARAMETERS;
278 
279 	cpu_spin_lock(&rxtx_spinlock);
280 
281 	if (!rxtx_size)
282 		goto out;
283 	unmap_buf(rx_buf, rxtx_size);
284 	unmap_buf(tx_buf, rxtx_size);
285 	rxtx_size = 0;
286 	rx_buf = NULL;
287 	tx_buf = NULL;
288 	ret_fid = FFA_SUCCESS_32;
289 	rc = 0;
290 out:
291 	cpu_spin_unlock(&rxtx_spinlock);
292 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
293 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
294 }
295 
296 static void handle_rx_release(struct thread_smc_args *args)
297 {
298 	uint32_t ret_fid = 0;
299 	int rc = 0;
300 
301 	cpu_spin_lock(&rxtx_spinlock);
302 	/* The senders RX is our TX */
303 	if (!rxtx_size || tx_buf_is_mine) {
304 		ret_fid = FFA_ERROR;
305 		rc = FFA_DENIED;
306 	} else {
307 		ret_fid = FFA_SUCCESS_32;
308 		rc = 0;
309 		tx_buf_is_mine = true;
310 	}
311 	cpu_spin_unlock(&rxtx_spinlock);
312 
313 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
314 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
315 }
316 
317 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
318 {
319 	return !w0 && !w1 && !w2 && !w3;
320 }
321 
322 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
323 {
324 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
325 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
326 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
327 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
328 }
329 
330 static void handle_partition_info_get(struct thread_smc_args *args)
331 {
332 	uint32_t ret_fid = 0;
333 	int rc = 0;
334 
335 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
336 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
337 		ret_fid = FFA_ERROR;
338 		rc = FFA_INVALID_PARAMETERS;
339 		goto out;
340 	}
341 
342 	cpu_spin_lock(&rxtx_spinlock);
343 	if (rxtx_size && tx_buf_is_mine) {
344 		struct ffa_partition_info *fpi = tx_buf;
345 
346 		fpi->id = SPMC_ENDPOINT_ID;
347 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
348 		fpi->partition_properties = BIT(0) | BIT(1);
349 
350 		ret_fid = FFA_SUCCESS_32;
351 		rc = 1;
352 		tx_buf_is_mine = false;
353 	} else {
354 		ret_fid = FFA_ERROR;
355 		if (rxtx_size)
356 			rc = FFA_BUSY;
357 		else
358 			rc = FFA_DENIED; /* TX buffer not setup yet */
359 	}
360 	cpu_spin_unlock(&rxtx_spinlock);
361 
362 out:
363 	set_args(args, ret_fid, FFA_PARAM_MBZ, rc,
364 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
365 }
366 
367 static void handle_yielding_call(struct thread_smc_args *args)
368 {
369 	uint32_t ret_val = 0;
370 
371 	thread_check_canaries();
372 
373 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
374 		/* Note connection to struct thread_rpc_arg::ret */
375 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
376 				       0);
377 		ret_val = FFA_INVALID_PARAMETERS;
378 	} else {
379 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5);
380 		ret_val = FFA_BUSY;
381 	}
382 	set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
383 		 swap_src_dst(args->a1), 0, ret_val, 0, 0);
384 }
385 
386 static void handle_blocking_call(struct thread_smc_args *args)
387 {
388 	switch (args->a3) {
389 	case OPTEE_FFA_GET_API_VERSION:
390 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
391 			 swap_src_dst(args->a1), 0, OPTEE_FFA_VERSION_MAJOR,
392 			 OPTEE_FFA_VERSION_MINOR, 0);
393 		break;
394 	case OPTEE_FFA_GET_OS_VERSION:
395 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
396 			 swap_src_dst(args->a1), 0, CFG_OPTEE_REVISION_MAJOR,
397 			 CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
398 		break;
399 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
400 		set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
401 			 swap_src_dst(args->a1), 0, 0, 0, 0);
402 		break;
403 	default:
404 		EMSG("Unhandled blocking service ID %#"PRIx32,
405 		     (uint32_t)args->a3);
406 		panic();
407 	}
408 }
409 
410 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
411 			 unsigned int num_mem_accs, uint8_t *acc_perms,
412 			 unsigned int *region_offs)
413 {
414 	unsigned int n = 0;
415 
416 	for (n = 0; n < num_mem_accs; n++) {
417 		struct mem_access_perm_descr *descr =
418 			&mem_acc[n].mem_access_perm_descr;
419 
420 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
421 			*acc_perms = READ_ONCE(descr->access_perm);
422 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
423 			return 0;
424 		}
425 	}
426 
427 	return FFA_INVALID_PARAMETERS;
428 }
429 
430 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
431 			  unsigned int *region_count, size_t *addr_range_offs)
432 {
433 	struct mem_region_descr *region_descr = NULL;
434 	struct mem_transaction_descr *descr = NULL;
435 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
436 	/* Normal memory, Write-Back cacheable, Inner shareable */
437 	const uint8_t exp_mem_reg_attr = 0x2f;
438 	unsigned int num_mem_accs = 0;
439 	uint8_t mem_acc_perm = 0;
440 	unsigned int region_descr_offs = 0;
441 	size_t n = 0;
442 
443 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
444 	    blen < sizeof(struct mem_transaction_descr))
445 		return FFA_INVALID_PARAMETERS;
446 
447 	descr = buf;
448 
449 	/* Check that the endpoint memory access descriptor array fits */
450 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
451 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
452 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
453 		return FFA_INVALID_PARAMETERS;
454 
455 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
456 		return FFA_INVALID_PARAMETERS;
457 
458 	/* Check that the access permissions matches what's expected */
459 	if (get_acc_perms(descr->mem_accsess_descr_array,
460 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
461 	    mem_acc_perm != exp_mem_acc_perm)
462 		return FFA_INVALID_PARAMETERS;
463 
464 	/* Check that the Composite memory region descriptor fits */
465 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
466 	    n > blen)
467 		return FFA_INVALID_PARAMETERS;
468 
469 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
470 			     struct mem_region_descr))
471 		return FFA_INVALID_PARAMETERS;
472 
473 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
474 						    region_descr_offs);
475 	*page_count = READ_ONCE(region_descr->total_page_count);
476 	*region_count = READ_ONCE(region_descr->address_range_count);
477 	*addr_range_offs = n;
478 	return 0;
479 }
480 
481 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
482 				size_t flen)
483 {
484 	unsigned int region_count = flen /
485 				    sizeof(struct constituent_address_range);
486 	struct constituent_address_range *arange = NULL;
487 	unsigned int n = 0;
488 
489 	if (region_count > s->region_count)
490 		region_count = s->region_count;
491 
492 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
493 		return FFA_INVALID_PARAMETERS;
494 	arange = buf;
495 
496 	for (n = 0; n < region_count; n++) {
497 		unsigned int page_count = READ_ONCE(arange[n].page_count);
498 		uint64_t addr = READ_ONCE(arange[n].address);
499 
500 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
501 					  addr, page_count))
502 			return FFA_INVALID_PARAMETERS;
503 	}
504 
505 	s->region_count -= region_count;
506 	if (s->region_count)
507 		return region_count * sizeof(*arange);
508 
509 	if (s->current_page_idx != s->page_count)
510 		return FFA_INVALID_PARAMETERS;
511 
512 	return 0;
513 }
514 
515 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
516 {
517 	int rc = 0;
518 
519 	rc = add_mem_share_helper(&s->share, buf, flen);
520 	if (rc >= 0) {
521 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
522 			if (s->share.region_count)
523 				return s->frag_offset;
524 			/* We're done, return the number of consumed bytes */
525 			rc = s->frag_offset;
526 		} else {
527 			rc = FFA_INVALID_PARAMETERS;
528 		}
529 	}
530 
531 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
532 	if (rc < 0)
533 		mobj_ffa_sel1_spmc_delete(s->share.mf);
534 	else
535 		mobj_ffa_push_to_inactive(s->share.mf);
536 	free(s);
537 
538 	return rc;
539 }
540 
541 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
542 			 size_t flen, uint64_t *global_handle)
543 {
544 	int rc = 0;
545 	struct mem_share_state share = { };
546 	size_t addr_range_offs = 0;
547 	size_t n = 0;
548 
549 	if (flen > blen)
550 		return FFA_INVALID_PARAMETERS;
551 
552 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
553 			    &addr_range_offs);
554 	if (rc)
555 		return rc;
556 
557 	if (MUL_OVERFLOW(share.region_count,
558 			 sizeof(struct constituent_address_range), &n) ||
559 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
560 		return FFA_INVALID_PARAMETERS;
561 
562 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
563 	if (!share.mf)
564 		return FFA_NO_MEMORY;
565 
566 	if (flen != blen) {
567 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
568 
569 		if (!s) {
570 			rc = FFA_NO_MEMORY;
571 			goto err;
572 		}
573 		s->share = share;
574 		s->mm = mm;
575 		s->frag_offset = addr_range_offs;
576 
577 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
578 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
579 					flen - addr_range_offs);
580 
581 		if (rc >= 0)
582 			*global_handle = mobj_ffa_get_cookie(share.mf);
583 
584 		return rc;
585 	}
586 
587 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
588 				  flen - addr_range_offs);
589 	if (rc) {
590 		/*
591 		 * Number of consumed bytes may be returned instead of 0 for
592 		 * done.
593 		 */
594 		rc = FFA_INVALID_PARAMETERS;
595 		goto err;
596 	}
597 
598 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
599 
600 	return 0;
601 err:
602 	mobj_ffa_sel1_spmc_delete(share.mf);
603 	return rc;
604 }
605 
606 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
607 				 unsigned int page_count,
608 				 uint64_t *global_handle)
609 {
610 	int rc = 0;
611 	size_t len = 0;
612 	tee_mm_entry_t *mm = NULL;
613 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
614 
615 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
616 		return FFA_INVALID_PARAMETERS;
617 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
618 		return FFA_INVALID_PARAMETERS;
619 
620 	/*
621 	 * Check that the length reported in blen is covered by len even
622 	 * if the offset is taken into account.
623 	 */
624 	if (len < blen || len - offs < blen)
625 		return FFA_INVALID_PARAMETERS;
626 
627 	mm = tee_mm_alloc(&tee_mm_shm, len);
628 	if (!mm)
629 		return FFA_NO_MEMORY;
630 
631 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
632 					  page_count, MEM_AREA_NSEC_SHM)) {
633 		rc = FFA_INVALID_PARAMETERS;
634 		goto out;
635 	}
636 
637 	cpu_spin_lock(&rxtx_spinlock);
638 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
639 			   global_handle);
640 	cpu_spin_unlock(&rxtx_spinlock);
641 	if (rc > 0)
642 		return rc;
643 
644 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
645 out:
646 	tee_mm_free(mm);
647 	return rc;
648 }
649 
650 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
651 				  uint64_t *global_handle)
652 {
653 	int rc = FFA_DENIED;
654 
655 	cpu_spin_lock(&rxtx_spinlock);
656 
657 	if (rx_buf && flen <= rxtx_size)
658 		rc = add_mem_share(NULL, rx_buf, blen, flen, global_handle);
659 
660 	cpu_spin_unlock(&rxtx_spinlock);
661 
662 	return rc;
663 }
664 
665 static void handle_mem_share(struct thread_smc_args *args)
666 {
667 	uint32_t ret_w1 = 0;
668 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
669 	uint32_t ret_w3 = 0;
670 	uint32_t ret_fid = FFA_ERROR;
671 	uint64_t global_handle = 0;
672 	int rc = 0;
673 
674 	/* Check that the MBZs are indeed 0 */
675 	if (args->a5 || args->a6 || args->a7)
676 		goto out;
677 
678 	if (!args->a3) {
679 		/*
680 		 * The memory transaction descriptor is passed via our rx
681 		 * buffer.
682 		 */
683 		if (args->a4)
684 			goto out;
685 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle);
686 	} else {
687 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
688 					   args->a4, &global_handle);
689 	}
690 	if (rc < 0) {
691 		ret_w2 = rc;
692 		goto out;
693 	}
694 	if (rc > 0) {
695 		ret_fid = FFA_MEM_FRAG_RX;
696 		ret_w3 = rc;
697 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
698 	}
699 	ret_fid = FFA_SUCCESS_32;
700 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
701 out:
702 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
703 }
704 
705 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
706 {
707 	struct mem_frag_state *s = NULL;
708 
709 	SLIST_FOREACH(s, &frag_state_head, link)
710 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
711 			return s;
712 
713 	return NULL;
714 }
715 
716 static void handle_mem_frag_tx(struct thread_smc_args *args)
717 {
718 	int rc = 0;
719 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
720 						READ_ONCE(args->a1));
721 	size_t flen = READ_ONCE(args->a3);
722 	struct mem_frag_state *s = NULL;
723 	tee_mm_entry_t *mm = NULL;
724 	unsigned int page_count = 0;
725 	void *buf = NULL;
726 	uint32_t ret_w1 = 0;
727 	uint32_t ret_w2 = 0;
728 	uint32_t ret_w3 = 0;
729 	uint32_t ret_fid = 0;
730 
731 	/*
732 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
733 	 * requests.
734 	 */
735 
736 	cpu_spin_lock(&rxtx_spinlock);
737 
738 	s = get_frag_state(global_handle);
739 	if (!s) {
740 		rc = FFA_INVALID_PARAMETERS;
741 		goto out;
742 	}
743 
744 	mm = s->mm;
745 	if (mm) {
746 		if (flen > tee_mm_get_bytes(mm)) {
747 			rc = FFA_INVALID_PARAMETERS;
748 			goto out;
749 		}
750 		page_count = s->share.page_count;
751 		buf = (void *)tee_mm_get_smem(mm);
752 	} else {
753 		if (flen > rxtx_size) {
754 			rc = FFA_INVALID_PARAMETERS;
755 			goto out;
756 		}
757 		buf = rx_buf;
758 	}
759 
760 	rc = add_mem_share_frag(s, buf, flen);
761 out:
762 	cpu_spin_unlock(&rxtx_spinlock);
763 
764 	if (rc <= 0 && mm) {
765 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
766 		tee_mm_free(mm);
767 	}
768 
769 	if (rc < 0) {
770 		ret_fid = FFA_ERROR;
771 		ret_w2 = rc;
772 	} else if (rc > 0) {
773 		ret_fid = FFA_MEM_FRAG_RX;
774 		ret_w3 = rc;
775 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
776 	} else {
777 		ret_fid = FFA_SUCCESS_32;
778 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
779 	}
780 
781 	set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
782 }
783 
784 static void handle_mem_reclaim(struct thread_smc_args *args)
785 {
786 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
787 	uint32_t ret_fid = FFA_ERROR;
788 	uint64_t cookie = 0;
789 
790 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
791 		goto out;
792 
793 	cookie = reg_pair_to_64(args->a2, args->a1);
794 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
795 	case TEE_SUCCESS:
796 		ret_fid = FFA_SUCCESS_32;
797 		ret_val = 0;
798 		break;
799 	case TEE_ERROR_ITEM_NOT_FOUND:
800 		DMSG("cookie %#"PRIx64" not found", cookie);
801 		ret_val = FFA_INVALID_PARAMETERS;
802 		break;
803 	default:
804 		DMSG("cookie %#"PRIx64" busy", cookie);
805 		ret_val = FFA_DENIED;
806 		break;
807 	}
808 out:
809 	set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
810 }
811 
812 /* Only called from assembly */
813 void thread_spmc_msg_recv(struct thread_smc_args *args);
814 void thread_spmc_msg_recv(struct thread_smc_args *args)
815 {
816 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
817 	switch (args->a0) {
818 	case FFA_VERSION:
819 		handle_version(args);
820 		break;
821 	case FFA_FEATURES:
822 		handle_features(args);
823 		break;
824 #ifdef ARM64
825 	case FFA_RXTX_MAP_64:
826 #endif
827 	case FFA_RXTX_MAP_32:
828 		handle_rxtx_map(args);
829 		break;
830 	case FFA_RXTX_UNMAP:
831 		handle_rxtx_unmap(args);
832 		break;
833 	case FFA_RX_RELEASE:
834 		handle_rx_release(args);
835 		break;
836 	case FFA_PARTITION_INFO_GET:
837 		handle_partition_info_get(args);
838 		break;
839 	case FFA_INTERRUPT:
840 		itr_core_handler();
841 		set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
842 		break;
843 	case FFA_MSG_SEND_DIRECT_REQ_32:
844 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
845 			handle_yielding_call(args);
846 		else
847 			handle_blocking_call(args);
848 		break;
849 #ifdef ARM64
850 	case FFA_MEM_SHARE_64:
851 #endif
852 	case FFA_MEM_SHARE_32:
853 		handle_mem_share(args);
854 		break;
855 	case FFA_MEM_RECLAIM:
856 		handle_mem_reclaim(args);
857 		break;
858 	case FFA_MEM_FRAG_TX:
859 		handle_mem_frag_tx(args);
860 		break;
861 	default:
862 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
863 		set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
864 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
865 	}
866 }
867 
868 static uint32_t yielding_call_with_arg(uint64_t cookie)
869 {
870 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
871 	struct optee_msg_arg *arg = NULL;
872 	struct mobj *mobj = NULL;
873 	uint32_t num_params = 0;
874 
875 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
876 	if (!mobj) {
877 		EMSG("Can't find cookie %#"PRIx64, cookie);
878 		return TEE_ERROR_BAD_PARAMETERS;
879 	}
880 
881 	rv = mobj_inc_map(mobj);
882 	if (rv)
883 		goto out_put_mobj;
884 
885 	rv = TEE_ERROR_BAD_PARAMETERS;
886 	arg = mobj_get_va(mobj, 0);
887 	if (!arg)
888 		goto out_dec_map;
889 
890 	if (!mobj_get_va(mobj, sizeof(*arg)))
891 		goto out_dec_map;
892 
893 	num_params = READ_ONCE(arg->num_params);
894 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
895 		goto out_dec_map;
896 
897 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
898 		goto out_dec_map;
899 
900 	rv = tee_entry_std(arg, num_params);
901 
902 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
903 
904 out_dec_map:
905 	mobj_dec_map(mobj);
906 out_put_mobj:
907 	mobj_put(mobj);
908 	return rv;
909 }
910 
911 static uint32_t yielding_unregister_shm(uint64_t cookie)
912 {
913 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
914 
915 	switch (res) {
916 	case TEE_SUCCESS:
917 	case TEE_ERROR_ITEM_NOT_FOUND:
918 		return 0;
919 	case TEE_ERROR_BUSY:
920 		EMSG("res %#"PRIx32, res);
921 		return FFA_BUSY;
922 	default:
923 		EMSG("res %#"PRIx32, res);
924 		return FFA_INVALID_PARAMETERS;
925 	}
926 }
927 
928 /*
929  * Helper routine for the assembly function thread_std_smc_entry()
930  *
931  * Note: this function is weak just to make it possible to exclude it from
932  * the unpaged area.
933  */
934 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
935 				       uint32_t a2, uint32_t a3)
936 {
937 	/*
938 	 * Arguments are supplied from handle_yielding_call() as:
939 	 * a0 <- w1
940 	 * a1 <- w3
941 	 * a2 <- w4
942 	 * a3 <- w5
943 	 */
944 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
945 	switch (a1) {
946 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
947 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
948 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
949 		return FFA_NOT_SUPPORTED;
950 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
951 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
952 	default:
953 		return FFA_DENIED;
954 	}
955 }
956 
957 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
958 {
959 	uint64_t offs = tpm->u.memref.offs;
960 
961 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
962 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
963 
964 	param->u.fmem.offs_low = offs;
965 	param->u.fmem.offs_high = offs >> 32;
966 	if (param->u.fmem.offs_high != offs >> 32)
967 		return false;
968 
969 	param->u.fmem.size = tpm->u.memref.size;
970 	if (tpm->u.memref.mobj) {
971 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
972 		if (!param->u.fmem.global_id)
973 			return false;
974 	} else {
975 		param->u.fmem.global_id = 0;
976 	}
977 
978 	return true;
979 }
980 
981 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
982 {
983 	TEE_Result res = TEE_SUCCESS;
984 	struct thread_rpc_arg rpc_arg = { .call = {
985 			.w1 = thread_get_tsd()->rpc_target_info,
986 			.w4 = type,
987 		},
988 	};
989 
990 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
991 	mobj_put(mobj);
992 	res = mobj_ffa_unregister_by_cookie(cookie);
993 	if (res)
994 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
995 		     cookie, res);
996 	thread_rpc(&rpc_arg);
997 }
998 
999 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1000 {
1001 	struct mobj *mobj = NULL;
1002 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1003 				  SMALL_PAGE_SIZE;
1004 	struct thread_rpc_arg rpc_arg = { .call = {
1005 			.w1 = thread_get_tsd()->rpc_target_info,
1006 			.w4 = type,
1007 			.w5 = page_count,
1008 		},
1009 	};
1010 	unsigned int internal_offset = 0;
1011 	uint64_t cookie = 0;
1012 
1013 	thread_rpc(&rpc_arg);
1014 
1015 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1016 	if (!cookie)
1017 		return NULL;
1018 	internal_offset = rpc_arg.ret.w6;
1019 
1020 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1021 	if (!mobj) {
1022 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1023 		     cookie, internal_offset);
1024 		return NULL;
1025 	}
1026 
1027 	assert(mobj_is_nonsec(mobj));
1028 
1029 	if (mobj_inc_map(mobj)) {
1030 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1031 		mobj_put(mobj);
1032 		return NULL;
1033 	}
1034 
1035 	return mobj;
1036 }
1037 
1038 struct mobj *thread_rpc_alloc_payload(size_t size)
1039 {
1040 	return thread_rpc_alloc(size,
1041 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1042 }
1043 
1044 void thread_rpc_free_payload(struct mobj *mobj)
1045 {
1046 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1047 			mobj_get_cookie(mobj), mobj);
1048 }
1049 
1050 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1051 {
1052 	return thread_rpc_alloc(size,
1053 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1054 }
1055 
1056 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1057 {
1058 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1059 			mobj_get_cookie(mobj), mobj);
1060 }
1061 
1062 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1063 			    struct thread_param *params,
1064 			    struct optee_msg_arg **arg_ret,
1065 			    uint64_t *carg_ret)
1066 {
1067 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1068 	struct thread_ctx *thr = threads + thread_get_id();
1069 	struct optee_msg_arg *arg = thr->rpc_arg;
1070 
1071 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1072 		return TEE_ERROR_BAD_PARAMETERS;
1073 
1074 	if (!arg) {
1075 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1076 
1077 		if (!mobj)
1078 			return TEE_ERROR_OUT_OF_MEMORY;
1079 
1080 		arg = mobj_get_va(mobj, 0);
1081 		if (!arg) {
1082 			thread_rpc_free_kernel_payload(mobj);
1083 			return TEE_ERROR_OUT_OF_MEMORY;
1084 		}
1085 
1086 		thr->rpc_arg = arg;
1087 		thr->rpc_mobj = mobj;
1088 	}
1089 
1090 	memset(arg, 0, sz);
1091 	arg->cmd = cmd;
1092 	arg->num_params = num_params;
1093 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1094 
1095 	for (size_t n = 0; n < num_params; n++) {
1096 		switch (params[n].attr) {
1097 		case THREAD_PARAM_ATTR_NONE:
1098 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1099 			break;
1100 		case THREAD_PARAM_ATTR_VALUE_IN:
1101 		case THREAD_PARAM_ATTR_VALUE_OUT:
1102 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1103 			arg->params[n].attr = params[n].attr -
1104 					      THREAD_PARAM_ATTR_VALUE_IN +
1105 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1106 			arg->params[n].u.value.a = params[n].u.value.a;
1107 			arg->params[n].u.value.b = params[n].u.value.b;
1108 			arg->params[n].u.value.c = params[n].u.value.c;
1109 			break;
1110 		case THREAD_PARAM_ATTR_MEMREF_IN:
1111 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1112 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1113 			if (!set_fmem(arg->params + n, params + n))
1114 				return TEE_ERROR_BAD_PARAMETERS;
1115 			break;
1116 		default:
1117 			return TEE_ERROR_BAD_PARAMETERS;
1118 		}
1119 	}
1120 
1121 	*arg_ret = arg;
1122 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1123 
1124 	return TEE_SUCCESS;
1125 }
1126 
1127 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1128 				struct thread_param *params)
1129 {
1130 	for (size_t n = 0; n < num_params; n++) {
1131 		switch (params[n].attr) {
1132 		case THREAD_PARAM_ATTR_VALUE_OUT:
1133 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1134 			params[n].u.value.a = arg->params[n].u.value.a;
1135 			params[n].u.value.b = arg->params[n].u.value.b;
1136 			params[n].u.value.c = arg->params[n].u.value.c;
1137 			break;
1138 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1139 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1140 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1141 			break;
1142 		default:
1143 			break;
1144 		}
1145 	}
1146 
1147 	return arg->ret;
1148 }
1149 
1150 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1151 			struct thread_param *params)
1152 {
1153 	struct thread_rpc_arg rpc_arg = { .call = {
1154 			.w1 = thread_get_tsd()->rpc_target_info,
1155 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1156 		},
1157 	};
1158 	uint64_t carg = 0;
1159 	struct optee_msg_arg *arg = NULL;
1160 	uint32_t ret = 0;
1161 
1162 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1163 	if (ret)
1164 		return ret;
1165 
1166 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1167 	thread_rpc(&rpc_arg);
1168 
1169 	return get_rpc_arg_res(arg, num_params, params);
1170 }
1171 
1172 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1173 {
1174 	return NULL;
1175 }
1176 
1177 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1178 {
1179 	/*
1180 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1181 	 * returns NULL.
1182 	 */
1183 	volatile bool cant_happen __maybe_unused = true;
1184 
1185 	assert(!cant_happen);
1186 }
1187