xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 4107d2f93e3ecd34707f1f8768950b4a61a02cbe)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <io.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <kernel/secure_partition.h>
13 #include <kernel/spinlock.h>
14 #include <kernel/spmc_sp_handler.h>
15 #include <kernel/tee_misc.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_spmc.h>
18 #include <mm/core_mmu.h>
19 #include <mm/mobj.h>
20 #include <optee_ffa.h>
21 #include <optee_msg.h>
22 #include <optee_rpc_cmd.h>
23 #include <string.h>
24 #include <sys/queue.h>
25 #include <tee/entry_std.h>
26 #include <util.h>
27 
28 #include "thread_private.h"
29 
30 /* Table 39: Constituent memory region descriptor */
31 struct constituent_address_range {
32 	uint64_t address;
33 	uint32_t page_count;
34 	uint32_t reserved;
35 };
36 
37 /* Table 38: Composite memory region descriptor */
38 struct mem_region_descr {
39 	uint32_t total_page_count;
40 	uint32_t address_range_count;
41 	uint64_t reserved;
42 	struct constituent_address_range address_range_array[];
43 };
44 
45 /* Table 40: Memory access permissions descriptor */
46 struct mem_access_perm_descr {
47 	uint16_t endpoint_id;
48 	uint8_t access_perm;
49 	uint8_t flags;
50 };
51 
52 /* Table 41: Endpoint memory access descriptor */
53 struct mem_accsess_descr {
54 	struct mem_access_perm_descr mem_access_perm_descr;
55 	uint32_t mem_region_offs;
56 	uint64_t reserved;
57 };
58 
59 /* Table 44: Lend, donate or share memory transaction descriptor */
60 struct mem_transaction_descr {
61 	uint16_t sender_id;
62 	uint8_t mem_reg_attr;
63 	uint8_t reserved0;
64 	uint32_t flags;
65 	uint64_t global_handle;
66 	uint64_t tag;
67 	uint32_t reserved1;
68 	uint32_t mem_access_descr_count;
69 	struct mem_accsess_descr mem_accsess_descr_array[];
70 };
71 
72 struct ffa_partition_info {
73 	uint16_t id;
74 	uint16_t execution_context;
75 	uint32_t partition_properties;
76 };
77 
78 struct mem_share_state {
79 	struct mobj_ffa *mf;
80 	unsigned int page_count;
81 	unsigned int region_count;
82 	unsigned int current_page_idx;
83 };
84 
85 struct mem_frag_state {
86 	struct mem_share_state share;
87 	tee_mm_entry_t *mm;
88 	unsigned int frag_offset;
89 	SLIST_ENTRY(mem_frag_state) link;
90 };
91 
92 /*
93  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
94  *
95  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
96  * access this includes the use of content of struct ffa_rxtx::rx and
97  * @frag_state_head.
98  *
99  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
100  * ffa_rxtx::tx and false when it is owned by normal world.
101  *
102  * Note that we can't prevent normal world from updating the content of
103  * these buffers so we must always be careful when reading. while we hold
104  * the lock.
105  */
106 
107 static struct ffa_rxtx nw_rxtx;
108 
109 static bool is_nw_buf(struct ffa_rxtx *rxtx)
110 {
111 	return rxtx == &nw_rxtx;
112 }
113 
114 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
115 	SLIST_HEAD_INITIALIZER(&frag_state_head);
116 
117 static uint32_t swap_src_dst(uint32_t src_dst)
118 {
119 	return (src_dst >> 16) | (src_dst << 16);
120 }
121 
122 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
123 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
124 {
125 	*args = (struct thread_smc_args){ .a0 = fid,
126 					  .a1 = src_dst,
127 					  .a2 = w2,
128 					  .a3 = w3,
129 					  .a4 = w4,
130 					  .a5 = w5, };
131 }
132 
133 void spmc_handle_version(struct thread_smc_args *args)
134 {
135 	/*
136 	 * We currently only support one version, 1.0 so let's keep it
137 	 * simple.
138 	 */
139 	spmc_set_args(args,
140 		      MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR),
141 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144 
145 static void handle_features(struct thread_smc_args *args)
146 {
147 	uint32_t ret_fid = 0;
148 	uint32_t ret_w2 = FFA_PARAM_MBZ;
149 
150 	switch (args->a1) {
151 #ifdef ARM64
152 	case FFA_RXTX_MAP_64:
153 #endif
154 	case FFA_RXTX_MAP_32:
155 		ret_fid = FFA_SUCCESS_32;
156 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
157 		break;
158 #ifdef ARM64
159 	case FFA_MEM_SHARE_64:
160 #endif
161 	case FFA_MEM_SHARE_32:
162 		ret_fid = FFA_SUCCESS_32;
163 		/*
164 		 * Partition manager supports transmission of a memory
165 		 * transaction descriptor in a buffer dynamically allocated
166 		 * by the endpoint.
167 		 */
168 		ret_w2 = BIT(0);
169 		break;
170 
171 	case FFA_ERROR:
172 	case FFA_VERSION:
173 	case FFA_SUCCESS_32:
174 #ifdef ARM64
175 	case FFA_SUCCESS_64:
176 #endif
177 	case FFA_MEM_FRAG_TX:
178 	case FFA_MEM_RECLAIM:
179 	case FFA_MSG_SEND_DIRECT_REQ_32:
180 	case FFA_INTERRUPT:
181 	case FFA_PARTITION_INFO_GET:
182 	case FFA_RX_RELEASE:
183 		ret_fid = FFA_SUCCESS_32;
184 		break;
185 	default:
186 		ret_fid = FFA_ERROR;
187 		ret_w2 = FFA_NOT_SUPPORTED;
188 		break;
189 	}
190 
191 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
192 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
193 }
194 
195 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
196 {
197 	tee_mm_entry_t *mm = NULL;
198 
199 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
200 		return FFA_INVALID_PARAMETERS;
201 
202 	mm = tee_mm_alloc(&tee_mm_shm, sz);
203 	if (!mm)
204 		return FFA_NO_MEMORY;
205 
206 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
207 					  sz / SMALL_PAGE_SIZE,
208 					  MEM_AREA_NSEC_SHM)) {
209 		tee_mm_free(mm);
210 		return FFA_INVALID_PARAMETERS;
211 	}
212 
213 	*va_ret = (void *)tee_mm_get_smem(mm);
214 	return 0;
215 }
216 
217 static void unmap_buf(void *va, size_t sz)
218 {
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
220 
221 	assert(mm);
222 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
223 	tee_mm_free(mm);
224 }
225 
226 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
227 {
228 	int rc = 0;
229 	uint32_t ret_fid = FFA_ERROR;
230 	unsigned int sz = 0;
231 	paddr_t rx_pa = 0;
232 	paddr_t tx_pa = 0;
233 	void *rx = NULL;
234 	void *tx = NULL;
235 
236 	cpu_spin_lock(&rxtx->spinlock);
237 
238 	if (args->a3 & GENMASK_64(63, 6)) {
239 		rc = FFA_INVALID_PARAMETERS;
240 		goto out;
241 	}
242 
243 	sz = args->a3 * SMALL_PAGE_SIZE;
244 	if (!sz) {
245 		rc = FFA_INVALID_PARAMETERS;
246 		goto out;
247 	}
248 	/* TX/RX are swapped compared to the caller */
249 	tx_pa = args->a2;
250 	rx_pa = args->a1;
251 
252 	if (rxtx->size) {
253 		rc = FFA_DENIED;
254 		goto out;
255 	}
256 
257 	/*
258 	 * If the buffer comes from a SP the address is virtual and already
259 	 * mapped.
260 	 */
261 	if (is_nw_buf(rxtx)) {
262 		rc = map_buf(tx_pa, sz, &tx);
263 		if (rc)
264 			goto out;
265 		rc = map_buf(rx_pa, sz, &rx);
266 		if (rc) {
267 			unmap_buf(tx, sz);
268 			goto out;
269 		}
270 		rxtx->tx = tx;
271 		rxtx->rx = rx;
272 	} else {
273 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
274 			rc = FFA_INVALID_PARAMETERS;
275 			goto out;
276 		}
277 
278 		if (!virt_to_phys((void *)tx_pa) ||
279 		    !virt_to_phys((void *)rx_pa)) {
280 			rc = FFA_INVALID_PARAMETERS;
281 			goto out;
282 		}
283 
284 		rxtx->tx = (void *)tx_pa;
285 		rxtx->rx = (void *)rx_pa;
286 	}
287 
288 	rxtx->size = sz;
289 	rxtx->tx_is_mine = true;
290 	ret_fid = FFA_SUCCESS_32;
291 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
292 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
293 out:
294 	cpu_spin_unlock(&rxtx->spinlock);
295 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
296 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
297 }
298 
299 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
300 {
301 	uint32_t ret_fid = FFA_ERROR;
302 	int rc = FFA_INVALID_PARAMETERS;
303 
304 	cpu_spin_lock(&rxtx->spinlock);
305 
306 	if (!rxtx->size)
307 		goto out;
308 
309 	/* We don't unmap the SP memory as the SP might still use it */
310 	if (is_nw_buf(rxtx)) {
311 		unmap_buf(rxtx->rx, rxtx->size);
312 		unmap_buf(rxtx->tx, rxtx->size);
313 	}
314 	rxtx->size = 0;
315 	rxtx->rx = NULL;
316 	rxtx->tx = NULL;
317 	ret_fid = FFA_SUCCESS_32;
318 	rc = 0;
319 out:
320 	cpu_spin_unlock(&rxtx->spinlock);
321 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
322 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
323 }
324 
325 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
326 {
327 	uint32_t ret_fid = 0;
328 	int rc = 0;
329 
330 	cpu_spin_lock(&rxtx->spinlock);
331 	/* The senders RX is our TX */
332 	if (!rxtx->size || rxtx->tx_is_mine) {
333 		ret_fid = FFA_ERROR;
334 		rc = FFA_DENIED;
335 	} else {
336 		ret_fid = FFA_SUCCESS_32;
337 		rc = 0;
338 		rxtx->tx_is_mine = true;
339 	}
340 	cpu_spin_unlock(&rxtx->spinlock);
341 
342 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
343 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
344 }
345 
346 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
347 {
348 	return !w0 && !w1 && !w2 && !w3;
349 }
350 
351 static bool is_optee_os_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
352 {
353 	return w0 == OPTEE_MSG_OS_OPTEE_UUID_0 &&
354 	       w1 == OPTEE_MSG_OS_OPTEE_UUID_1 &&
355 	       w2 == OPTEE_MSG_OS_OPTEE_UUID_2 &&
356 	       w3 == OPTEE_MSG_OS_OPTEE_UUID_3;
357 }
358 
359 static void handle_partition_info_get(struct thread_smc_args *args,
360 				      struct ffa_rxtx *rxtx)
361 {
362 	uint32_t ret_fid = 0;
363 	int rc = 0;
364 
365 	if (!is_nil_uuid(args->a1, args->a2, args->a3, args->a4) &&
366 	    !is_optee_os_uuid(args->a1, args->a2, args->a3, args->a4)) {
367 		ret_fid = FFA_ERROR;
368 		rc = FFA_INVALID_PARAMETERS;
369 		goto out;
370 	}
371 
372 	cpu_spin_lock(&rxtx->spinlock);
373 	if (rxtx->size && rxtx->tx_is_mine) {
374 		struct ffa_partition_info *fpi = rxtx->tx;
375 
376 		fpi->id = SPMC_ENDPOINT_ID;
377 		fpi->execution_context = CFG_TEE_CORE_NB_CORE;
378 		fpi->partition_properties = BIT(0) | BIT(1);
379 
380 		ret_fid = FFA_SUCCESS_32;
381 		rc = 1;
382 		rxtx->tx_is_mine = false;
383 	} else {
384 		ret_fid = FFA_ERROR;
385 		if (rxtx->size)
386 			rc = FFA_BUSY;
387 		else
388 			rc = FFA_DENIED; /* TX buffer not setup yet */
389 	}
390 	cpu_spin_unlock(&rxtx->spinlock);
391 
392 out:
393 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
394 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
395 }
396 
397 static void handle_yielding_call(struct thread_smc_args *args)
398 {
399 	uint32_t ret_val = 0;
400 
401 	thread_check_canaries();
402 
403 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
404 		/* Note connection to struct thread_rpc_arg::ret */
405 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
406 				       0);
407 		ret_val = FFA_INVALID_PARAMETERS;
408 	} else {
409 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
410 				     args->a6, args->a7);
411 		ret_val = FFA_BUSY;
412 	}
413 	spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32, swap_src_dst(args->a1),
414 		      0, ret_val, 0, 0);
415 }
416 
417 static void handle_blocking_call(struct thread_smc_args *args)
418 {
419 	switch (args->a3) {
420 	case OPTEE_FFA_GET_API_VERSION:
421 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
422 			      swap_src_dst(args->a1), 0,
423 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
424 			      0);
425 		break;
426 	case OPTEE_FFA_GET_OS_VERSION:
427 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
428 			      swap_src_dst(args->a1), 0,
429 			      CFG_OPTEE_REVISION_MAJOR,
430 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
431 		break;
432 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
433 		spmc_set_args(args, FFA_MSG_SEND_DIRECT_RESP_32,
434 			      swap_src_dst(args->a1), 0, 0, 0, 0);
435 		break;
436 	default:
437 		EMSG("Unhandled blocking service ID %#"PRIx32,
438 		     (uint32_t)args->a3);
439 		panic();
440 	}
441 }
442 
443 static int get_acc_perms(struct mem_accsess_descr *mem_acc,
444 			 unsigned int num_mem_accs, uint8_t *acc_perms,
445 			 unsigned int *region_offs)
446 {
447 	unsigned int n = 0;
448 
449 	for (n = 0; n < num_mem_accs; n++) {
450 		struct mem_access_perm_descr *descr =
451 			&mem_acc[n].mem_access_perm_descr;
452 
453 		if (READ_ONCE(descr->endpoint_id) == SPMC_ENDPOINT_ID) {
454 			*acc_perms = READ_ONCE(descr->access_perm);
455 			*region_offs = READ_ONCE(mem_acc[n].mem_region_offs);
456 			return 0;
457 		}
458 	}
459 
460 	return FFA_INVALID_PARAMETERS;
461 }
462 
463 static int mem_share_init(void *buf, size_t blen, unsigned int *page_count,
464 			  unsigned int *region_count, size_t *addr_range_offs)
465 {
466 	struct mem_region_descr *region_descr = NULL;
467 	struct mem_transaction_descr *descr = NULL;
468 	const uint8_t exp_mem_acc_perm = 0x6; /* Not executable, Read-write */
469 	/* Normal memory, Write-Back cacheable, Inner shareable */
470 	const uint8_t exp_mem_reg_attr = 0x2f;
471 	unsigned int num_mem_accs = 0;
472 	uint8_t mem_acc_perm = 0;
473 	unsigned int region_descr_offs = 0;
474 	size_t n = 0;
475 
476 	if (!ALIGNMENT_IS_OK(buf, struct mem_transaction_descr) ||
477 	    blen < sizeof(struct mem_transaction_descr))
478 		return FFA_INVALID_PARAMETERS;
479 
480 	descr = buf;
481 
482 	/* Check that the endpoint memory access descriptor array fits */
483 	num_mem_accs = READ_ONCE(descr->mem_access_descr_count);
484 	if (MUL_OVERFLOW(sizeof(struct mem_accsess_descr), num_mem_accs, &n) ||
485 	    ADD_OVERFLOW(sizeof(*descr), n, &n) || n > blen)
486 		return FFA_INVALID_PARAMETERS;
487 
488 	if (READ_ONCE(descr->mem_reg_attr) != exp_mem_reg_attr)
489 		return FFA_INVALID_PARAMETERS;
490 
491 	/* Check that the access permissions matches what's expected */
492 	if (get_acc_perms(descr->mem_accsess_descr_array,
493 			  num_mem_accs, &mem_acc_perm, &region_descr_offs) ||
494 	    mem_acc_perm != exp_mem_acc_perm)
495 		return FFA_INVALID_PARAMETERS;
496 
497 	/* Check that the Composite memory region descriptor fits */
498 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
499 	    n > blen)
500 		return FFA_INVALID_PARAMETERS;
501 
502 	if (!ALIGNMENT_IS_OK((vaddr_t)descr + region_descr_offs,
503 			     struct mem_region_descr))
504 		return FFA_INVALID_PARAMETERS;
505 
506 	region_descr = (struct mem_region_descr *)((vaddr_t)descr +
507 						    region_descr_offs);
508 	*page_count = READ_ONCE(region_descr->total_page_count);
509 	*region_count = READ_ONCE(region_descr->address_range_count);
510 	*addr_range_offs = n;
511 	return 0;
512 }
513 
514 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
515 				size_t flen)
516 {
517 	unsigned int region_count = flen /
518 				    sizeof(struct constituent_address_range);
519 	struct constituent_address_range *arange = NULL;
520 	unsigned int n = 0;
521 
522 	if (region_count > s->region_count)
523 		region_count = s->region_count;
524 
525 	if (!ALIGNMENT_IS_OK(buf, struct constituent_address_range))
526 		return FFA_INVALID_PARAMETERS;
527 	arange = buf;
528 
529 	for (n = 0; n < region_count; n++) {
530 		unsigned int page_count = READ_ONCE(arange[n].page_count);
531 		uint64_t addr = READ_ONCE(arange[n].address);
532 
533 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
534 					  addr, page_count))
535 			return FFA_INVALID_PARAMETERS;
536 	}
537 
538 	s->region_count -= region_count;
539 	if (s->region_count)
540 		return region_count * sizeof(*arange);
541 
542 	if (s->current_page_idx != s->page_count)
543 		return FFA_INVALID_PARAMETERS;
544 
545 	return 0;
546 }
547 
548 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
549 {
550 	int rc = 0;
551 
552 	rc = add_mem_share_helper(&s->share, buf, flen);
553 	if (rc >= 0) {
554 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
555 			if (s->share.region_count)
556 				return s->frag_offset;
557 			/* We're done, return the number of consumed bytes */
558 			rc = s->frag_offset;
559 		} else {
560 			rc = FFA_INVALID_PARAMETERS;
561 		}
562 	}
563 
564 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
565 	if (rc < 0)
566 		mobj_ffa_sel1_spmc_delete(s->share.mf);
567 	else
568 		mobj_ffa_push_to_inactive(s->share.mf);
569 	free(s);
570 
571 	return rc;
572 }
573 
574 static int add_mem_share(tee_mm_entry_t *mm, void *buf, size_t blen,
575 			 size_t flen, uint64_t *global_handle)
576 {
577 	int rc = 0;
578 	struct mem_share_state share = { };
579 	size_t addr_range_offs = 0;
580 	size_t n = 0;
581 
582 	if (flen > blen)
583 		return FFA_INVALID_PARAMETERS;
584 
585 	rc = mem_share_init(buf, flen, &share.page_count, &share.region_count,
586 			    &addr_range_offs);
587 	if (rc)
588 		return rc;
589 
590 	if (MUL_OVERFLOW(share.region_count,
591 			 sizeof(struct constituent_address_range), &n) ||
592 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
593 		return FFA_INVALID_PARAMETERS;
594 
595 	share.mf = mobj_ffa_sel1_spmc_new(share.page_count);
596 	if (!share.mf)
597 		return FFA_NO_MEMORY;
598 
599 	if (flen != blen) {
600 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
601 
602 		if (!s) {
603 			rc = FFA_NO_MEMORY;
604 			goto err;
605 		}
606 		s->share = share;
607 		s->mm = mm;
608 		s->frag_offset = addr_range_offs;
609 
610 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
611 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
612 					flen - addr_range_offs);
613 
614 		if (rc >= 0)
615 			*global_handle = mobj_ffa_get_cookie(share.mf);
616 
617 		return rc;
618 	}
619 
620 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
621 				  flen - addr_range_offs);
622 	if (rc) {
623 		/*
624 		 * Number of consumed bytes may be returned instead of 0 for
625 		 * done.
626 		 */
627 		rc = FFA_INVALID_PARAMETERS;
628 		goto err;
629 	}
630 
631 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
632 
633 	return 0;
634 err:
635 	mobj_ffa_sel1_spmc_delete(share.mf);
636 	return rc;
637 }
638 
639 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
640 				 unsigned int page_count,
641 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
642 {
643 	int rc = 0;
644 	size_t len = 0;
645 	tee_mm_entry_t *mm = NULL;
646 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
647 
648 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
649 		return FFA_INVALID_PARAMETERS;
650 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
651 		return FFA_INVALID_PARAMETERS;
652 
653 	/*
654 	 * Check that the length reported in blen is covered by len even
655 	 * if the offset is taken into account.
656 	 */
657 	if (len < blen || len - offs < blen)
658 		return FFA_INVALID_PARAMETERS;
659 
660 	mm = tee_mm_alloc(&tee_mm_shm, len);
661 	if (!mm)
662 		return FFA_NO_MEMORY;
663 
664 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
665 					  page_count, MEM_AREA_NSEC_SHM)) {
666 		rc = FFA_INVALID_PARAMETERS;
667 		goto out;
668 	}
669 
670 	cpu_spin_lock(&rxtx->spinlock);
671 	rc = add_mem_share(mm, (void *)(tee_mm_get_smem(mm) + offs), blen, flen,
672 			   global_handle);
673 	cpu_spin_unlock(&rxtx->spinlock);
674 	if (rc > 0)
675 		return rc;
676 
677 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
678 out:
679 	tee_mm_free(mm);
680 	return rc;
681 }
682 
683 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
684 				  uint64_t *global_handle,
685 				  struct ffa_rxtx *rxtx)
686 {
687 	int rc = FFA_DENIED;
688 
689 	cpu_spin_lock(&rxtx->spinlock);
690 
691 	if (rxtx->rx && flen <= rxtx->size)
692 		rc = add_mem_share(NULL, rxtx->rx, blen, flen, global_handle);
693 
694 	cpu_spin_unlock(&rxtx->spinlock);
695 
696 	return rc;
697 }
698 
699 static void handle_mem_share(struct thread_smc_args *args,
700 			     struct ffa_rxtx *rxtx)
701 {
702 	uint32_t ret_w1 = 0;
703 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
704 	uint32_t ret_w3 = 0;
705 	uint32_t ret_fid = FFA_ERROR;
706 	uint64_t global_handle = 0;
707 	int rc = 0;
708 
709 	/* Check that the MBZs are indeed 0 */
710 	if (args->a5 || args->a6 || args->a7)
711 		goto out;
712 
713 	if (!args->a3) {
714 		/*
715 		 * The memory transaction descriptor is passed via our rx
716 		 * buffer.
717 		 */
718 		if (args->a4)
719 			goto out;
720 		rc = handle_mem_share_rxbuf(args->a1, args->a2, &global_handle,
721 					    rxtx);
722 	} else {
723 		rc = handle_mem_share_tmem(args->a3, args->a1, args->a2,
724 					   args->a4, &global_handle, rxtx);
725 	}
726 	if (rc < 0) {
727 		ret_w2 = rc;
728 		goto out;
729 	}
730 	if (rc > 0) {
731 		ret_fid = FFA_MEM_FRAG_RX;
732 		ret_w3 = rc;
733 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
734 	}
735 	ret_fid = FFA_SUCCESS_32;
736 	reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
737 out:
738 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
739 }
740 
741 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
742 {
743 	struct mem_frag_state *s = NULL;
744 
745 	SLIST_FOREACH(s, &frag_state_head, link)
746 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
747 			return s;
748 
749 	return NULL;
750 }
751 
752 static void handle_mem_frag_tx(struct thread_smc_args *args,
753 			       struct ffa_rxtx *rxtx)
754 {
755 	int rc = 0;
756 	uint64_t global_handle = reg_pair_to_64(READ_ONCE(args->a2),
757 						READ_ONCE(args->a1));
758 	size_t flen = READ_ONCE(args->a3);
759 	struct mem_frag_state *s = NULL;
760 	tee_mm_entry_t *mm = NULL;
761 	unsigned int page_count = 0;
762 	void *buf = NULL;
763 	uint32_t ret_w1 = 0;
764 	uint32_t ret_w2 = 0;
765 	uint32_t ret_w3 = 0;
766 	uint32_t ret_fid = 0;
767 
768 	/*
769 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
770 	 * requests.
771 	 */
772 
773 	cpu_spin_lock(&rxtx->spinlock);
774 
775 	s = get_frag_state(global_handle);
776 	if (!s) {
777 		rc = FFA_INVALID_PARAMETERS;
778 		goto out;
779 	}
780 
781 	mm = s->mm;
782 	if (mm) {
783 		if (flen > tee_mm_get_bytes(mm)) {
784 			rc = FFA_INVALID_PARAMETERS;
785 			goto out;
786 		}
787 		page_count = s->share.page_count;
788 		buf = (void *)tee_mm_get_smem(mm);
789 	} else {
790 		if (flen > rxtx->size) {
791 			rc = FFA_INVALID_PARAMETERS;
792 			goto out;
793 		}
794 		buf = rxtx->rx;
795 	}
796 
797 	rc = add_mem_share_frag(s, buf, flen);
798 out:
799 	cpu_spin_unlock(&rxtx->spinlock);
800 
801 	if (rc <= 0 && mm) {
802 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
803 		tee_mm_free(mm);
804 	}
805 
806 	if (rc < 0) {
807 		ret_fid = FFA_ERROR;
808 		ret_w2 = rc;
809 	} else if (rc > 0) {
810 		ret_fid = FFA_MEM_FRAG_RX;
811 		ret_w3 = rc;
812 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
813 	} else {
814 		ret_fid = FFA_SUCCESS_32;
815 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
816 	}
817 
818 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
819 }
820 
821 static void handle_mem_reclaim(struct thread_smc_args *args)
822 {
823 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
824 	uint32_t ret_fid = FFA_ERROR;
825 	uint64_t cookie = 0;
826 
827 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
828 		goto out;
829 
830 	cookie = reg_pair_to_64(args->a2, args->a1);
831 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
832 	case TEE_SUCCESS:
833 		ret_fid = FFA_SUCCESS_32;
834 		ret_val = 0;
835 		break;
836 	case TEE_ERROR_ITEM_NOT_FOUND:
837 		DMSG("cookie %#"PRIx64" not found", cookie);
838 		ret_val = FFA_INVALID_PARAMETERS;
839 		break;
840 	default:
841 		DMSG("cookie %#"PRIx64" busy", cookie);
842 		ret_val = FFA_DENIED;
843 		break;
844 	}
845 out:
846 	spmc_set_args(args, ret_fid, ret_val, 0, 0, 0, 0);
847 }
848 
849 /* Only called from assembly */
850 void thread_spmc_msg_recv(struct thread_smc_args *args);
851 void thread_spmc_msg_recv(struct thread_smc_args *args)
852 {
853 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
854 	switch (args->a0) {
855 	case FFA_VERSION:
856 		spmc_handle_version(args);
857 		break;
858 	case FFA_FEATURES:
859 		handle_features(args);
860 		break;
861 #ifdef ARM64
862 	case FFA_RXTX_MAP_64:
863 #endif
864 	case FFA_RXTX_MAP_32:
865 		spmc_handle_rxtx_map(args, &nw_rxtx);
866 		break;
867 	case FFA_RXTX_UNMAP:
868 		spmc_handle_rxtx_unmap(args, &nw_rxtx);
869 		break;
870 	case FFA_RX_RELEASE:
871 		spmc_handle_rx_release(args, &nw_rxtx);
872 		break;
873 	case FFA_PARTITION_INFO_GET:
874 		handle_partition_info_get(args, &nw_rxtx);
875 		break;
876 	case FFA_INTERRUPT:
877 		itr_core_handler();
878 		spmc_set_args(args, FFA_SUCCESS_32, args->a1, 0, 0, 0, 0);
879 		break;
880 	case FFA_MSG_SEND_DIRECT_REQ_32:
881 		if (IS_ENABLED(CFG_SECURE_PARTITION) &&
882 		    FFA_DST(args->a1) != SPMC_ENDPOINT_ID) {
883 			spmc_sp_start_thread(args);
884 			break;
885 		}
886 
887 		if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
888 			handle_yielding_call(args);
889 		else
890 			handle_blocking_call(args);
891 		break;
892 #ifdef ARM64
893 	case FFA_MEM_SHARE_64:
894 #endif
895 	case FFA_MEM_SHARE_32:
896 		handle_mem_share(args, &nw_rxtx);
897 		break;
898 	case FFA_MEM_RECLAIM:
899 		handle_mem_reclaim(args);
900 		break;
901 	case FFA_MEM_FRAG_TX:
902 		handle_mem_frag_tx(args, &nw_rxtx);
903 		break;
904 	default:
905 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
906 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
907 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
908 	}
909 }
910 
911 static uint32_t yielding_call_with_arg(uint64_t cookie)
912 {
913 	uint32_t rv = TEE_ERROR_BAD_PARAMETERS;
914 	struct optee_msg_arg *arg = NULL;
915 	struct mobj *mobj = NULL;
916 	uint32_t num_params = 0;
917 
918 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
919 	if (!mobj) {
920 		EMSG("Can't find cookie %#"PRIx64, cookie);
921 		return TEE_ERROR_BAD_PARAMETERS;
922 	}
923 
924 	rv = mobj_inc_map(mobj);
925 	if (rv)
926 		goto out_put_mobj;
927 
928 	rv = TEE_ERROR_BAD_PARAMETERS;
929 	arg = mobj_get_va(mobj, 0);
930 	if (!arg)
931 		goto out_dec_map;
932 
933 	if (!mobj_get_va(mobj, sizeof(*arg)))
934 		goto out_dec_map;
935 
936 	num_params = READ_ONCE(arg->num_params);
937 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
938 		goto out_dec_map;
939 
940 	if (!mobj_get_va(mobj, OPTEE_MSG_GET_ARG_SIZE(num_params)))
941 		goto out_dec_map;
942 
943 	rv = tee_entry_std(arg, num_params);
944 
945 	thread_rpc_shm_cache_clear(&threads[thread_get_id()].shm_cache);
946 
947 out_dec_map:
948 	mobj_dec_map(mobj);
949 out_put_mobj:
950 	mobj_put(mobj);
951 	return rv;
952 }
953 
954 static uint32_t yielding_unregister_shm(uint64_t cookie)
955 {
956 	uint32_t res = mobj_ffa_unregister_by_cookie(cookie);
957 
958 	switch (res) {
959 	case TEE_SUCCESS:
960 	case TEE_ERROR_ITEM_NOT_FOUND:
961 		return 0;
962 	case TEE_ERROR_BUSY:
963 		EMSG("res %#"PRIx32, res);
964 		return FFA_BUSY;
965 	default:
966 		EMSG("res %#"PRIx32, res);
967 		return FFA_INVALID_PARAMETERS;
968 	}
969 }
970 
971 /*
972  * Helper routine for the assembly function thread_std_smc_entry()
973  *
974  * Note: this function is weak just to make it possible to exclude it from
975  * the unpaged area.
976  */
977 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
978 				       uint32_t a2, uint32_t a3,
979 				       uint32_t a4 __unused,
980 				       uint32_t a5 __unused)
981 {
982 	/*
983 	 * Arguments are supplied from handle_yielding_call() as:
984 	 * a0 <- w1
985 	 * a1 <- w3
986 	 * a2 <- w4
987 	 * a3 <- w5
988 	 * a4 <- w6
989 	 * a5 <- w7
990 	 */
991 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
992 	switch (a1) {
993 	case OPTEE_FFA_YIELDING_CALL_WITH_ARG:
994 		return yielding_call_with_arg(reg_pair_to_64(a3, a2));
995 	case OPTEE_FFA_YIELDING_CALL_REGISTER_SHM:
996 		return FFA_NOT_SUPPORTED;
997 	case OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM:
998 		return yielding_unregister_shm(reg_pair_to_64(a3, a2));
999 	default:
1000 		return FFA_DENIED;
1001 	}
1002 }
1003 
1004 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1005 {
1006 	uint64_t offs = tpm->u.memref.offs;
1007 
1008 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1009 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1010 
1011 	param->u.fmem.offs_low = offs;
1012 	param->u.fmem.offs_high = offs >> 32;
1013 	if (param->u.fmem.offs_high != offs >> 32)
1014 		return false;
1015 
1016 	param->u.fmem.size = tpm->u.memref.size;
1017 	if (tpm->u.memref.mobj) {
1018 		param->u.fmem.global_id = mobj_get_cookie(tpm->u.memref.mobj);
1019 		if (!param->u.fmem.global_id)
1020 			return false;
1021 	} else {
1022 		param->u.fmem.global_id = 0;
1023 	}
1024 
1025 	return true;
1026 }
1027 
1028 static void thread_rpc_free(uint32_t type, uint64_t cookie, struct mobj *mobj)
1029 {
1030 	TEE_Result res = TEE_SUCCESS;
1031 	struct thread_rpc_arg rpc_arg = { .call = {
1032 			.w1 = thread_get_tsd()->rpc_target_info,
1033 			.w4 = type,
1034 		},
1035 	};
1036 
1037 	reg_pair_from_64(cookie, &rpc_arg.call.w6, &rpc_arg.call.w5);
1038 	mobj_put(mobj);
1039 	res = mobj_ffa_unregister_by_cookie(cookie);
1040 	if (res)
1041 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): res %#"PRIx32,
1042 		     cookie, res);
1043 	thread_rpc(&rpc_arg);
1044 }
1045 
1046 static struct mobj *thread_rpc_alloc(size_t size, uint32_t type)
1047 {
1048 	struct mobj *mobj = NULL;
1049 	unsigned int page_count = ROUNDUP(size, SMALL_PAGE_SIZE) /
1050 				  SMALL_PAGE_SIZE;
1051 	struct thread_rpc_arg rpc_arg = { .call = {
1052 			.w1 = thread_get_tsd()->rpc_target_info,
1053 			.w4 = type,
1054 			.w5 = page_count,
1055 		},
1056 	};
1057 	unsigned int internal_offset = 0;
1058 	uint64_t cookie = 0;
1059 
1060 	thread_rpc(&rpc_arg);
1061 
1062 	cookie = reg_pair_to_64(rpc_arg.ret.w5, rpc_arg.ret.w4);
1063 	if (!cookie)
1064 		return NULL;
1065 	internal_offset = rpc_arg.ret.w6;
1066 
1067 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1068 	if (!mobj) {
1069 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1070 		     cookie, internal_offset);
1071 		return NULL;
1072 	}
1073 
1074 	assert(mobj_is_nonsec(mobj));
1075 
1076 	if (mobj_inc_map(mobj)) {
1077 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1078 		mobj_put(mobj);
1079 		return NULL;
1080 	}
1081 
1082 	return mobj;
1083 }
1084 
1085 struct mobj *thread_rpc_alloc_payload(size_t size)
1086 {
1087 	return thread_rpc_alloc(size,
1088 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_SUPPL_SHM);
1089 }
1090 
1091 void thread_rpc_free_payload(struct mobj *mobj)
1092 {
1093 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_SUPPL_SHM,
1094 			mobj_get_cookie(mobj), mobj);
1095 }
1096 
1097 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1098 {
1099 	return thread_rpc_alloc(size,
1100 				OPTEE_FFA_YIELDING_CALL_RETURN_ALLOC_KERN_SHM);
1101 }
1102 
1103 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1104 {
1105 	thread_rpc_free(OPTEE_FFA_YIELDING_CALL_RETURN_FREE_KERN_SHM,
1106 			mobj_get_cookie(mobj), mobj);
1107 }
1108 
1109 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1110 			    struct thread_param *params,
1111 			    struct optee_msg_arg **arg_ret,
1112 			    uint64_t *carg_ret)
1113 {
1114 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1115 	struct thread_ctx *thr = threads + thread_get_id();
1116 	struct optee_msg_arg *arg = thr->rpc_arg;
1117 
1118 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1119 		return TEE_ERROR_BAD_PARAMETERS;
1120 
1121 	if (!arg) {
1122 		struct mobj *mobj = thread_rpc_alloc_kernel_payload(sz);
1123 
1124 		if (!mobj)
1125 			return TEE_ERROR_OUT_OF_MEMORY;
1126 
1127 		arg = mobj_get_va(mobj, 0);
1128 		if (!arg) {
1129 			thread_rpc_free_kernel_payload(mobj);
1130 			return TEE_ERROR_OUT_OF_MEMORY;
1131 		}
1132 
1133 		thr->rpc_arg = arg;
1134 		thr->rpc_mobj = mobj;
1135 	}
1136 
1137 	memset(arg, 0, sz);
1138 	arg->cmd = cmd;
1139 	arg->num_params = num_params;
1140 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1141 
1142 	for (size_t n = 0; n < num_params; n++) {
1143 		switch (params[n].attr) {
1144 		case THREAD_PARAM_ATTR_NONE:
1145 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1146 			break;
1147 		case THREAD_PARAM_ATTR_VALUE_IN:
1148 		case THREAD_PARAM_ATTR_VALUE_OUT:
1149 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1150 			arg->params[n].attr = params[n].attr -
1151 					      THREAD_PARAM_ATTR_VALUE_IN +
1152 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1153 			arg->params[n].u.value.a = params[n].u.value.a;
1154 			arg->params[n].u.value.b = params[n].u.value.b;
1155 			arg->params[n].u.value.c = params[n].u.value.c;
1156 			break;
1157 		case THREAD_PARAM_ATTR_MEMREF_IN:
1158 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1159 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1160 			if (!set_fmem(arg->params + n, params + n))
1161 				return TEE_ERROR_BAD_PARAMETERS;
1162 			break;
1163 		default:
1164 			return TEE_ERROR_BAD_PARAMETERS;
1165 		}
1166 	}
1167 
1168 	*arg_ret = arg;
1169 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
1170 
1171 	return TEE_SUCCESS;
1172 }
1173 
1174 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1175 				struct thread_param *params)
1176 {
1177 	for (size_t n = 0; n < num_params; n++) {
1178 		switch (params[n].attr) {
1179 		case THREAD_PARAM_ATTR_VALUE_OUT:
1180 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1181 			params[n].u.value.a = arg->params[n].u.value.a;
1182 			params[n].u.value.b = arg->params[n].u.value.b;
1183 			params[n].u.value.c = arg->params[n].u.value.c;
1184 			break;
1185 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1186 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1187 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1188 			break;
1189 		default:
1190 			break;
1191 		}
1192 	}
1193 
1194 	return arg->ret;
1195 }
1196 
1197 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1198 			struct thread_param *params)
1199 {
1200 	struct thread_rpc_arg rpc_arg = { .call = {
1201 			.w1 = thread_get_tsd()->rpc_target_info,
1202 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1203 		},
1204 	};
1205 	uint64_t carg = 0;
1206 	struct optee_msg_arg *arg = NULL;
1207 	uint32_t ret = 0;
1208 
1209 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
1210 	if (ret)
1211 		return ret;
1212 
1213 	reg_pair_from_64(carg, &rpc_arg.call.w6, &rpc_arg.call.w5);
1214 	thread_rpc(&rpc_arg);
1215 
1216 	return get_rpc_arg_res(arg, num_params, params);
1217 }
1218 
1219 struct mobj *thread_rpc_alloc_global_payload(size_t size __unused)
1220 {
1221 	return NULL;
1222 }
1223 
1224 void thread_rpc_free_global_payload(struct mobj *mobj __unused)
1225 {
1226 	/*
1227 	 * "can't happen" since thread_rpc_alloc_global_payload() always
1228 	 * returns NULL.
1229 	 */
1230 	volatile bool cant_happen __maybe_unused = true;
1231 
1232 	assert(!cant_happen);
1233 }
1234