xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 static unsigned int spmc_notif_lock = SPINLOCK_UNLOCK;
51 static int do_bottom_half_value = -1;
52 static uint16_t notif_vm_id;
53 static bool spmc_notif_is_ready;
54 
55 /* Initialized in spmc_init() below */
56 static uint16_t my_endpoint_id __nex_bss;
57 #ifdef CFG_CORE_SEL1_SPMC
58 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
59 				      FFA_PART_PROP_DIRECT_REQ_SEND |
60 #ifdef CFG_NS_VIRTUALIZATION
61 				      FFA_PART_PROP_NOTIF_CREATED |
62 				      FFA_PART_PROP_NOTIF_DESTROYED |
63 #endif
64 #ifdef ARM64
65 				      FFA_PART_PROP_AARCH64_STATE |
66 #endif
67 				      FFA_PART_PROP_IS_PE_ID;
68 
69 static uint32_t my_uuid_words[] = {
70 	/*
71 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
72 	 *   SP, or
73 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
74 	 *   logical partition, residing in the same exception level as the
75 	 *   SPMC
76 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
77 	 */
78 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
79 };
80 
81 /*
82  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
83  *
84  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
85  * access this includes the use of content of struct ffa_rxtx::rx and
86  * @frag_state_head.
87  *
88  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
89  * ffa_rxtx::tx and false when it is owned by normal world.
90  *
91  * Note that we can't prevent normal world from updating the content of
92  * these buffers so we must always be careful when reading. while we hold
93  * the lock.
94  */
95 
96 static struct ffa_rxtx my_rxtx __nex_bss;
97 
98 static bool is_nw_buf(struct ffa_rxtx *rxtx)
99 {
100 	return rxtx == &my_rxtx;
101 }
102 
103 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
104 	SLIST_HEAD_INITIALIZER(&frag_state_head);
105 
106 static uint64_t notif_pending_bitmap;
107 static uint64_t notif_bound_bitmap;
108 static bool notif_vm_id_valid;
109 static int notif_intid = -1;
110 #else
111 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
112 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
113 static struct ffa_rxtx my_rxtx = {
114 	.rx = __rx_buf,
115 	.tx = __tx_buf,
116 	.size = sizeof(__rx_buf),
117 };
118 #endif
119 
120 static uint32_t swap_src_dst(uint32_t src_dst)
121 {
122 	return (src_dst >> 16) | (src_dst << 16);
123 }
124 
125 static uint16_t get_sender_id(uint32_t src_dst)
126 {
127 	return src_dst >> 16;
128 }
129 
130 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
131 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
132 {
133 	*args = (struct thread_smc_args){ .a0 = fid,
134 					  .a1 = src_dst,
135 					  .a2 = w2,
136 					  .a3 = w3,
137 					  .a4 = w4,
138 					  .a5 = w5, };
139 }
140 
141 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
142 {
143 	/*
144 	 * No locking, if the caller does concurrent calls to this it's
145 	 * only making a mess for itself. We must be able to renegotiate
146 	 * the FF-A version in order to support differing versions between
147 	 * the loader and the driver.
148 	 */
149 	if (vers < FFA_VERSION_1_1)
150 		rxtx->ffa_vers = FFA_VERSION_1_0;
151 	else
152 		rxtx->ffa_vers = FFA_VERSION_1_1;
153 
154 	return rxtx->ffa_vers;
155 }
156 
157 #if defined(CFG_CORE_SEL1_SPMC)
158 static void handle_features(struct thread_smc_args *args)
159 {
160 	uint32_t ret_fid = FFA_ERROR;
161 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
162 
163 	switch (args->a1) {
164 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
165 		if (spmc_notif_is_ready) {
166 			ret_fid = FFA_SUCCESS_32;
167 			ret_w2 = notif_intid;
168 		}
169 		break;
170 
171 #ifdef ARM64
172 	case FFA_RXTX_MAP_64:
173 #endif
174 	case FFA_RXTX_MAP_32:
175 		ret_fid = FFA_SUCCESS_32;
176 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
177 		break;
178 #ifdef ARM64
179 	case FFA_MEM_SHARE_64:
180 #endif
181 	case FFA_MEM_SHARE_32:
182 		ret_fid = FFA_SUCCESS_32;
183 		/*
184 		 * Partition manager supports transmission of a memory
185 		 * transaction descriptor in a buffer dynamically allocated
186 		 * by the endpoint.
187 		 */
188 		ret_w2 = BIT(0);
189 		break;
190 
191 	case FFA_ERROR:
192 	case FFA_VERSION:
193 	case FFA_SUCCESS_32:
194 #ifdef ARM64
195 	case FFA_SUCCESS_64:
196 #endif
197 	case FFA_FEATURES:
198 	case FFA_SPM_ID_GET:
199 	case FFA_MEM_FRAG_TX:
200 	case FFA_MEM_RECLAIM:
201 	case FFA_MSG_SEND_DIRECT_REQ_64:
202 	case FFA_MSG_SEND_DIRECT_REQ_32:
203 	case FFA_INTERRUPT:
204 	case FFA_PARTITION_INFO_GET:
205 	case FFA_RXTX_UNMAP:
206 	case FFA_RX_RELEASE:
207 	case FFA_FEATURE_MANAGED_EXIT_INTR:
208 	case FFA_NOTIFICATION_BITMAP_CREATE:
209 	case FFA_NOTIFICATION_BITMAP_DESTROY:
210 	case FFA_NOTIFICATION_BIND:
211 	case FFA_NOTIFICATION_UNBIND:
212 	case FFA_NOTIFICATION_SET:
213 	case FFA_NOTIFICATION_GET:
214 	case FFA_NOTIFICATION_INFO_GET_32:
215 #ifdef ARM64
216 	case FFA_NOTIFICATION_INFO_GET_64:
217 #endif
218 		ret_fid = FFA_SUCCESS_32;
219 		ret_w2 = FFA_PARAM_MBZ;
220 		break;
221 	default:
222 		break;
223 	}
224 
225 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
226 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
227 }
228 
229 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
230 {
231 	tee_mm_entry_t *mm = NULL;
232 
233 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
234 		return FFA_INVALID_PARAMETERS;
235 
236 	mm = tee_mm_alloc(&tee_mm_shm, sz);
237 	if (!mm)
238 		return FFA_NO_MEMORY;
239 
240 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
241 					  sz / SMALL_PAGE_SIZE,
242 					  MEM_AREA_NSEC_SHM)) {
243 		tee_mm_free(mm);
244 		return FFA_INVALID_PARAMETERS;
245 	}
246 
247 	*va_ret = (void *)tee_mm_get_smem(mm);
248 	return 0;
249 }
250 
251 static void handle_spm_id_get(struct thread_smc_args *args)
252 {
253 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
254 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
255 }
256 
257 static void unmap_buf(void *va, size_t sz)
258 {
259 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
260 
261 	assert(mm);
262 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
263 	tee_mm_free(mm);
264 }
265 
266 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
267 {
268 	int rc = 0;
269 	uint32_t ret_fid = FFA_ERROR;
270 	unsigned int sz = 0;
271 	paddr_t rx_pa = 0;
272 	paddr_t tx_pa = 0;
273 	void *rx = NULL;
274 	void *tx = NULL;
275 
276 	cpu_spin_lock(&rxtx->spinlock);
277 
278 	if (args->a3 & GENMASK_64(63, 6)) {
279 		rc = FFA_INVALID_PARAMETERS;
280 		goto out;
281 	}
282 
283 	sz = args->a3 * SMALL_PAGE_SIZE;
284 	if (!sz) {
285 		rc = FFA_INVALID_PARAMETERS;
286 		goto out;
287 	}
288 	/* TX/RX are swapped compared to the caller */
289 	tx_pa = args->a2;
290 	rx_pa = args->a1;
291 
292 	if (rxtx->size) {
293 		rc = FFA_DENIED;
294 		goto out;
295 	}
296 
297 	/*
298 	 * If the buffer comes from a SP the address is virtual and already
299 	 * mapped.
300 	 */
301 	if (is_nw_buf(rxtx)) {
302 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
303 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
304 			bool tx_alloced = false;
305 
306 			/*
307 			 * With virtualization we establish this mapping in
308 			 * the nexus mapping which then is replicated to
309 			 * each partition.
310 			 *
311 			 * This means that this mapping must be done before
312 			 * any partition is created and then must not be
313 			 * changed.
314 			 */
315 
316 			/*
317 			 * core_mmu_add_mapping() may reuse previous
318 			 * mappings. First check if there's any mappings to
319 			 * reuse so we know how to clean up in case of
320 			 * failure.
321 			 */
322 			tx = phys_to_virt(tx_pa, mt, sz);
323 			rx = phys_to_virt(rx_pa, mt, sz);
324 			if (!tx) {
325 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
326 				if (!tx) {
327 					rc = FFA_NO_MEMORY;
328 					goto out;
329 				}
330 				tx_alloced = true;
331 			}
332 			if (!rx)
333 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
334 
335 			if (!rx) {
336 				if (tx_alloced && tx)
337 					core_mmu_remove_mapping(mt, tx, sz);
338 				rc = FFA_NO_MEMORY;
339 				goto out;
340 			}
341 		} else {
342 			rc = map_buf(tx_pa, sz, &tx);
343 			if (rc)
344 				goto out;
345 			rc = map_buf(rx_pa, sz, &rx);
346 			if (rc) {
347 				unmap_buf(tx, sz);
348 				goto out;
349 			}
350 		}
351 		rxtx->tx = tx;
352 		rxtx->rx = rx;
353 	} else {
354 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
355 			rc = FFA_INVALID_PARAMETERS;
356 			goto out;
357 		}
358 
359 		if (!virt_to_phys((void *)tx_pa) ||
360 		    !virt_to_phys((void *)rx_pa)) {
361 			rc = FFA_INVALID_PARAMETERS;
362 			goto out;
363 		}
364 
365 		rxtx->tx = (void *)tx_pa;
366 		rxtx->rx = (void *)rx_pa;
367 	}
368 
369 	rxtx->size = sz;
370 	rxtx->tx_is_mine = true;
371 	ret_fid = FFA_SUCCESS_32;
372 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
373 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
374 out:
375 	cpu_spin_unlock(&rxtx->spinlock);
376 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
377 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
378 }
379 
380 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
381 {
382 	uint32_t ret_fid = FFA_ERROR;
383 	int rc = FFA_INVALID_PARAMETERS;
384 
385 	cpu_spin_lock(&rxtx->spinlock);
386 
387 	if (!rxtx->size)
388 		goto out;
389 
390 	/* We don't unmap the SP memory as the SP might still use it */
391 	if (is_nw_buf(rxtx)) {
392 		unmap_buf(rxtx->rx, rxtx->size);
393 		unmap_buf(rxtx->tx, rxtx->size);
394 	}
395 	rxtx->size = 0;
396 	rxtx->rx = NULL;
397 	rxtx->tx = NULL;
398 	ret_fid = FFA_SUCCESS_32;
399 	rc = 0;
400 out:
401 	cpu_spin_unlock(&rxtx->spinlock);
402 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
403 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
404 }
405 
406 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
407 {
408 	uint32_t ret_fid = 0;
409 	int rc = 0;
410 
411 	cpu_spin_lock(&rxtx->spinlock);
412 	/* The senders RX is our TX */
413 	if (!rxtx->size || rxtx->tx_is_mine) {
414 		ret_fid = FFA_ERROR;
415 		rc = FFA_DENIED;
416 	} else {
417 		ret_fid = FFA_SUCCESS_32;
418 		rc = 0;
419 		rxtx->tx_is_mine = true;
420 	}
421 	cpu_spin_unlock(&rxtx->spinlock);
422 
423 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
424 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
425 }
426 
427 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
428 {
429 	return !w0 && !w1 && !w2 && !w3;
430 }
431 
432 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
433 {
434 	/*
435 	 * This depends on which UUID we have been assigned.
436 	 * TODO add a generic mechanism to obtain our UUID.
437 	 *
438 	 * The test below is for the hard coded UUID
439 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
440 	 */
441 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
442 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
443 }
444 
445 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
446 				     size_t idx, uint16_t endpoint_id,
447 				     uint16_t execution_context,
448 				     uint32_t part_props,
449 				     const uint32_t uuid_words[4])
450 {
451 	struct ffa_partition_info_x *fpi = NULL;
452 	size_t fpi_size = sizeof(*fpi);
453 
454 	if (ffa_vers >= FFA_VERSION_1_1)
455 		fpi_size += FFA_UUID_SIZE;
456 
457 	if ((idx + 1) * fpi_size > blen)
458 		return TEE_ERROR_OUT_OF_MEMORY;
459 
460 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
461 	fpi->id = endpoint_id;
462 	/* Number of execution contexts implemented by this partition */
463 	fpi->execution_context = execution_context;
464 
465 	fpi->partition_properties = part_props;
466 
467 	if (ffa_vers >= FFA_VERSION_1_1) {
468 		if (uuid_words)
469 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
470 		else
471 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
472 	}
473 
474 	return TEE_SUCCESS;
475 }
476 
477 static int handle_partition_info_get_all(size_t *elem_count,
478 					 struct ffa_rxtx *rxtx, bool count_only)
479 {
480 	if (!count_only) {
481 		/* Add OP-TEE SP */
482 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
483 					      rxtx->size, 0, my_endpoint_id,
484 					      CFG_TEE_CORE_NB_CORE,
485 					      my_part_props, my_uuid_words))
486 			return FFA_NO_MEMORY;
487 	}
488 	*elem_count = 1;
489 
490 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
491 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
492 					  NULL, elem_count, count_only))
493 			return FFA_NO_MEMORY;
494 	}
495 
496 	return FFA_OK;
497 }
498 
499 void spmc_handle_partition_info_get(struct thread_smc_args *args,
500 				    struct ffa_rxtx *rxtx)
501 {
502 	TEE_Result res = TEE_SUCCESS;
503 	uint32_t ret_fid = FFA_ERROR;
504 	uint32_t fpi_size = 0;
505 	uint32_t rc = 0;
506 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
507 
508 	if (!count_only) {
509 		cpu_spin_lock(&rxtx->spinlock);
510 
511 		if (!rxtx->size || !rxtx->tx_is_mine) {
512 			rc = FFA_BUSY;
513 			goto out;
514 		}
515 	}
516 
517 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
518 		size_t elem_count = 0;
519 
520 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
521 							count_only);
522 
523 		if (ret_fid) {
524 			rc = ret_fid;
525 			ret_fid = FFA_ERROR;
526 		} else {
527 			ret_fid = FFA_SUCCESS_32;
528 			rc = elem_count;
529 		}
530 
531 		goto out;
532 	}
533 
534 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
535 		if (!count_only) {
536 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
537 							rxtx->tx, rxtx->size, 0,
538 							my_endpoint_id,
539 							CFG_TEE_CORE_NB_CORE,
540 							my_part_props,
541 							my_uuid_words);
542 			if (res) {
543 				ret_fid = FFA_ERROR;
544 				rc = FFA_INVALID_PARAMETERS;
545 				goto out;
546 			}
547 		}
548 		rc = 1;
549 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
550 		uint32_t uuid_array[4] = { 0 };
551 		TEE_UUID uuid = { };
552 		size_t count = 0;
553 
554 		uuid_array[0] = args->a1;
555 		uuid_array[1] = args->a2;
556 		uuid_array[2] = args->a3;
557 		uuid_array[3] = args->a4;
558 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
559 
560 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
561 					    rxtx->size, &uuid, &count,
562 					    count_only);
563 		if (res != TEE_SUCCESS) {
564 			ret_fid = FFA_ERROR;
565 			rc = FFA_INVALID_PARAMETERS;
566 			goto out;
567 		}
568 		rc = count;
569 	} else {
570 		ret_fid = FFA_ERROR;
571 		rc = FFA_INVALID_PARAMETERS;
572 		goto out;
573 	}
574 
575 	ret_fid = FFA_SUCCESS_32;
576 
577 out:
578 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
579 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
580 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
581 
582 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
583 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
584 	if (!count_only) {
585 		rxtx->tx_is_mine = false;
586 		cpu_spin_unlock(&rxtx->spinlock);
587 	}
588 }
589 
590 static void spmc_handle_run(struct thread_smc_args *args)
591 {
592 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
593 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
594 	uint32_t rc = FFA_OK;
595 
596 	if (endpoint != my_endpoint_id) {
597 		/*
598 		 * The endpoint should be an SP, try to resume the SP from
599 		 * preempted into busy state.
600 		 */
601 		rc = spmc_sp_resume_from_preempted(endpoint);
602 		if (rc)
603 			goto out;
604 	}
605 
606 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
607 
608 	/* thread_resume_from_rpc return only of the thread_id is invalid */
609 	rc = FFA_INVALID_PARAMETERS;
610 
611 out:
612 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
613 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
614 }
615 #endif /*CFG_CORE_SEL1_SPMC*/
616 
617 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
618 					uint16_t vm_id)
619 {
620 	uint32_t old_itr_status = 0;
621 
622 	if (!spmc_notif_is_ready) {
623 		/*
624 		 * This should never happen, not if normal world respects the
625 		 * exchanged capabilities.
626 		 */
627 		EMSG("Asynchronous notifications are not ready");
628 		return TEE_ERROR_NOT_IMPLEMENTED;
629 	}
630 
631 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
632 	do_bottom_half_value = bottom_half_value;
633 	if (!IS_ENABLED(CFG_CORE_SEL1_SPMC))
634 		notif_vm_id = vm_id;
635 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
636 
637 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
638 	return TEE_SUCCESS;
639 }
640 
641 static void handle_yielding_call(struct thread_smc_args *args,
642 				 uint32_t direct_resp_fid)
643 {
644 	TEE_Result res = 0;
645 
646 	thread_check_canaries();
647 
648 #ifdef ARM64
649 	/* Saving this for an eventual RPC */
650 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
651 #endif
652 
653 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
654 		/* Note connection to struct thread_rpc_arg::ret */
655 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
656 				       0);
657 		res = TEE_ERROR_BAD_PARAMETERS;
658 	} else {
659 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
660 				     args->a6, args->a7);
661 		res = TEE_ERROR_BUSY;
662 	}
663 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
664 		      0, res, 0, 0);
665 }
666 
667 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
668 {
669 	uint64_t cookie = reg_pair_to_64(a5, a4);
670 	uint32_t res = 0;
671 
672 	res = mobj_ffa_unregister_by_cookie(cookie);
673 	switch (res) {
674 	case TEE_SUCCESS:
675 	case TEE_ERROR_ITEM_NOT_FOUND:
676 		return 0;
677 	case TEE_ERROR_BUSY:
678 		EMSG("res %#"PRIx32, res);
679 		return FFA_BUSY;
680 	default:
681 		EMSG("res %#"PRIx32, res);
682 		return FFA_INVALID_PARAMETERS;
683 	}
684 }
685 
686 static void handle_blocking_call(struct thread_smc_args *args,
687 				 uint32_t direct_resp_fid)
688 {
689 	uint32_t sec_caps = 0;
690 
691 	switch (args->a3) {
692 	case OPTEE_FFA_GET_API_VERSION:
693 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
694 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
695 			      0);
696 		break;
697 	case OPTEE_FFA_GET_OS_VERSION:
698 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
699 			      CFG_OPTEE_REVISION_MAJOR,
700 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
701 		break;
702 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
703 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
704 		if (spmc_notif_is_ready)
705 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
706 		spmc_set_args(args, direct_resp_fid,
707 			      swap_src_dst(args->a1), 0, 0,
708 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
709 		break;
710 	case OPTEE_FFA_UNREGISTER_SHM:
711 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
712 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
713 		break;
714 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
715 		spmc_set_args(args, direct_resp_fid,
716 			      swap_src_dst(args->a1), 0,
717 			      spmc_enable_async_notif(args->a4,
718 						      FFA_SRC(args->a1)),
719 			      0, 0);
720 		break;
721 	default:
722 		EMSG("Unhandled blocking service ID %#"PRIx32,
723 		     (uint32_t)args->a3);
724 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
725 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
726 	}
727 }
728 
729 static void handle_framework_direct_request(struct thread_smc_args *args,
730 					    struct ffa_rxtx *rxtx,
731 					    uint32_t direct_resp_fid)
732 {
733 	uint32_t w0 = FFA_ERROR;
734 	uint32_t w1 = FFA_PARAM_MBZ;
735 	uint32_t w2 = FFA_NOT_SUPPORTED;
736 	uint32_t w3 = FFA_PARAM_MBZ;
737 
738 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
739 	case FFA_MSG_SEND_VM_CREATED:
740 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
741 			uint16_t guest_id = args->a5;
742 			TEE_Result res = virt_guest_created(guest_id);
743 
744 			w0 = direct_resp_fid;
745 			w1 = swap_src_dst(args->a1);
746 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
747 			if (res == TEE_SUCCESS)
748 				w3 = FFA_OK;
749 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
750 				w3 = FFA_DENIED;
751 			else
752 				w3 = FFA_INVALID_PARAMETERS;
753 		}
754 		break;
755 	case FFA_MSG_SEND_VM_DESTROYED:
756 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
757 			uint16_t guest_id = args->a5;
758 			TEE_Result res = virt_guest_destroyed(guest_id);
759 
760 			w0 = direct_resp_fid;
761 			w1 = swap_src_dst(args->a1);
762 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
763 			if (res == TEE_SUCCESS)
764 				w3 = FFA_OK;
765 			else
766 				w3 = FFA_INVALID_PARAMETERS;
767 		}
768 		break;
769 	case FFA_MSG_VERSION_REQ:
770 		w0 = direct_resp_fid;
771 		w1 = swap_src_dst(args->a1);
772 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
773 		w3 = spmc_exchange_version(args->a3, rxtx);
774 		break;
775 	default:
776 		break;
777 	}
778 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
779 }
780 
781 static void handle_direct_request(struct thread_smc_args *args,
782 				  struct ffa_rxtx *rxtx)
783 {
784 	uint32_t direct_resp_fid = 0;
785 
786 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
787 	    FFA_DST(args->a1) != my_endpoint_id) {
788 		spmc_sp_start_thread(args);
789 		return;
790 	}
791 
792 	if (OPTEE_SMC_IS_64(args->a0))
793 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
794 	else
795 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
796 
797 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
798 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
799 		return;
800 	}
801 
802 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
803 	    virt_set_guest(get_sender_id(args->a1))) {
804 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
805 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
806 		return;
807 	}
808 
809 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
810 		handle_yielding_call(args, direct_resp_fid);
811 	else
812 		handle_blocking_call(args, direct_resp_fid);
813 
814 	/*
815 	 * Note that handle_yielding_call() typically only returns if a
816 	 * thread cannot be allocated or found. virt_unset_guest() is also
817 	 * called from thread_state_suspend() and thread_state_free().
818 	 */
819 	virt_unset_guest();
820 }
821 
822 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
823 			      struct ffa_mem_transaction_x *trans)
824 {
825 	uint16_t mem_reg_attr = 0;
826 	uint32_t flags = 0;
827 	uint32_t count = 0;
828 	uint32_t offs = 0;
829 	uint32_t size = 0;
830 	size_t n = 0;
831 
832 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
833 		return FFA_INVALID_PARAMETERS;
834 
835 	if (ffa_vers >= FFA_VERSION_1_1) {
836 		struct ffa_mem_transaction_1_1 *descr = NULL;
837 
838 		if (blen < sizeof(*descr))
839 			return FFA_INVALID_PARAMETERS;
840 
841 		descr = buf;
842 		trans->sender_id = READ_ONCE(descr->sender_id);
843 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
844 		flags = READ_ONCE(descr->flags);
845 		trans->global_handle = READ_ONCE(descr->global_handle);
846 		trans->tag = READ_ONCE(descr->tag);
847 
848 		count = READ_ONCE(descr->mem_access_count);
849 		size = READ_ONCE(descr->mem_access_size);
850 		offs = READ_ONCE(descr->mem_access_offs);
851 	} else {
852 		struct ffa_mem_transaction_1_0 *descr = NULL;
853 
854 		if (blen < sizeof(*descr))
855 			return FFA_INVALID_PARAMETERS;
856 
857 		descr = buf;
858 		trans->sender_id = READ_ONCE(descr->sender_id);
859 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
860 		flags = READ_ONCE(descr->flags);
861 		trans->global_handle = READ_ONCE(descr->global_handle);
862 		trans->tag = READ_ONCE(descr->tag);
863 
864 		count = READ_ONCE(descr->mem_access_count);
865 		size = sizeof(struct ffa_mem_access);
866 		offs = offsetof(struct ffa_mem_transaction_1_0,
867 				mem_access_array);
868 	}
869 
870 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
871 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
872 		return FFA_INVALID_PARAMETERS;
873 
874 	/* Check that the endpoint memory access descriptor array fits */
875 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
876 	    n > blen)
877 		return FFA_INVALID_PARAMETERS;
878 
879 	trans->mem_reg_attr = mem_reg_attr;
880 	trans->flags = flags;
881 	trans->mem_access_size = size;
882 	trans->mem_access_count = count;
883 	trans->mem_access_offs = offs;
884 	return 0;
885 }
886 
887 #if defined(CFG_CORE_SEL1_SPMC)
888 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
889 			 unsigned int mem_access_count, uint8_t *acc_perms,
890 			 unsigned int *region_offs)
891 {
892 	struct ffa_mem_access_perm *descr = NULL;
893 	struct ffa_mem_access *mem_acc = NULL;
894 	unsigned int n = 0;
895 
896 	for (n = 0; n < mem_access_count; n++) {
897 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
898 		descr = &mem_acc->access_perm;
899 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
900 			*acc_perms = READ_ONCE(descr->perm);
901 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
902 			return 0;
903 		}
904 	}
905 
906 	return FFA_INVALID_PARAMETERS;
907 }
908 
909 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
910 			  size_t blen, unsigned int *page_count,
911 			  unsigned int *region_count, size_t *addr_range_offs)
912 {
913 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
914 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
915 	struct ffa_mem_region *region_descr = NULL;
916 	unsigned int region_descr_offs = 0;
917 	uint8_t mem_acc_perm = 0;
918 	size_t n = 0;
919 
920 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
921 		return FFA_INVALID_PARAMETERS;
922 
923 	/* Check that the access permissions matches what's expected */
924 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
925 			  mem_trans->mem_access_size,
926 			  mem_trans->mem_access_count,
927 			  &mem_acc_perm, &region_descr_offs) ||
928 	    mem_acc_perm != exp_mem_acc_perm)
929 		return FFA_INVALID_PARAMETERS;
930 
931 	/* Check that the Composite memory region descriptor fits */
932 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
933 	    n > blen)
934 		return FFA_INVALID_PARAMETERS;
935 
936 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
937 				  struct ffa_mem_region))
938 		return FFA_INVALID_PARAMETERS;
939 
940 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
941 						 region_descr_offs);
942 	*page_count = READ_ONCE(region_descr->total_page_count);
943 	*region_count = READ_ONCE(region_descr->address_range_count);
944 	*addr_range_offs = n;
945 	return 0;
946 }
947 
948 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
949 				size_t flen)
950 {
951 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
952 	struct ffa_address_range *arange = NULL;
953 	unsigned int n = 0;
954 
955 	if (region_count > s->region_count)
956 		region_count = s->region_count;
957 
958 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
959 		return FFA_INVALID_PARAMETERS;
960 	arange = buf;
961 
962 	for (n = 0; n < region_count; n++) {
963 		unsigned int page_count = READ_ONCE(arange[n].page_count);
964 		uint64_t addr = READ_ONCE(arange[n].address);
965 
966 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
967 					  addr, page_count))
968 			return FFA_INVALID_PARAMETERS;
969 	}
970 
971 	s->region_count -= region_count;
972 	if (s->region_count)
973 		return region_count * sizeof(*arange);
974 
975 	if (s->current_page_idx != s->page_count)
976 		return FFA_INVALID_PARAMETERS;
977 
978 	return 0;
979 }
980 
981 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
982 {
983 	int rc = 0;
984 
985 	rc = add_mem_share_helper(&s->share, buf, flen);
986 	if (rc >= 0) {
987 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
988 			/* We're not at the end of the descriptor yet */
989 			if (s->share.region_count)
990 				return s->frag_offset;
991 
992 			/* We're done */
993 			rc = 0;
994 		} else {
995 			rc = FFA_INVALID_PARAMETERS;
996 		}
997 	}
998 
999 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1000 	if (rc < 0)
1001 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1002 	else
1003 		mobj_ffa_push_to_inactive(s->share.mf);
1004 	free(s);
1005 
1006 	return rc;
1007 }
1008 
1009 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1010 			void *buf)
1011 {
1012 	struct ffa_mem_access_perm *perm = NULL;
1013 	struct ffa_mem_access *mem_acc = NULL;
1014 
1015 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1016 		return false;
1017 
1018 	if (mem_trans->mem_access_count < 1)
1019 		return false;
1020 
1021 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1022 	perm = &mem_acc->access_perm;
1023 
1024 	/*
1025 	 * perm->endpoint_id is read here only to check if the endpoint is
1026 	 * OP-TEE. We do read it later on again, but there are some additional
1027 	 * checks there to make sure that the data is correct.
1028 	 */
1029 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
1030 }
1031 
1032 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1033 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1034 			 size_t flen, uint64_t *global_handle)
1035 {
1036 	int rc = 0;
1037 	struct mem_share_state share = { };
1038 	size_t addr_range_offs = 0;
1039 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1040 	size_t n = 0;
1041 
1042 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1043 			    &share.region_count, &addr_range_offs);
1044 	if (rc)
1045 		return rc;
1046 
1047 	if (MUL_OVERFLOW(share.region_count,
1048 			 sizeof(struct ffa_address_range), &n) ||
1049 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1050 		return FFA_INVALID_PARAMETERS;
1051 
1052 	if (mem_trans->global_handle)
1053 		cookie = mem_trans->global_handle;
1054 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1055 	if (!share.mf)
1056 		return FFA_NO_MEMORY;
1057 
1058 	if (flen != blen) {
1059 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
1060 
1061 		if (!s) {
1062 			rc = FFA_NO_MEMORY;
1063 			goto err;
1064 		}
1065 		s->share = share;
1066 		s->mm = mm;
1067 		s->frag_offset = addr_range_offs;
1068 
1069 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1070 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1071 					flen - addr_range_offs);
1072 
1073 		if (rc >= 0)
1074 			*global_handle = mobj_ffa_get_cookie(share.mf);
1075 
1076 		return rc;
1077 	}
1078 
1079 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1080 				  flen - addr_range_offs);
1081 	if (rc) {
1082 		/*
1083 		 * Number of consumed bytes may be returned instead of 0 for
1084 		 * done.
1085 		 */
1086 		rc = FFA_INVALID_PARAMETERS;
1087 		goto err;
1088 	}
1089 
1090 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1091 
1092 	return 0;
1093 err:
1094 	mobj_ffa_sel1_spmc_delete(share.mf);
1095 	return rc;
1096 }
1097 
1098 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1099 				 unsigned int page_count,
1100 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1101 {
1102 	struct ffa_mem_transaction_x mem_trans = { };
1103 	int rc = 0;
1104 	size_t len = 0;
1105 	void *buf = NULL;
1106 	tee_mm_entry_t *mm = NULL;
1107 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1108 
1109 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1110 		return FFA_INVALID_PARAMETERS;
1111 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1112 		return FFA_INVALID_PARAMETERS;
1113 
1114 	/*
1115 	 * Check that the length reported in flen is covered by len even
1116 	 * if the offset is taken into account.
1117 	 */
1118 	if (len < flen || len - offs < flen)
1119 		return FFA_INVALID_PARAMETERS;
1120 
1121 	mm = tee_mm_alloc(&tee_mm_shm, len);
1122 	if (!mm)
1123 		return FFA_NO_MEMORY;
1124 
1125 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1126 					  page_count, MEM_AREA_NSEC_SHM)) {
1127 		rc = FFA_INVALID_PARAMETERS;
1128 		goto out;
1129 	}
1130 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1131 
1132 	cpu_spin_lock(&rxtx->spinlock);
1133 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1134 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1135 	    virt_set_guest(mem_trans.sender_id))
1136 		rc = FFA_DENIED;
1137 	if (!rc)
1138 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1139 				   global_handle);
1140 	virt_unset_guest();
1141 	cpu_spin_unlock(&rxtx->spinlock);
1142 	if (rc > 0)
1143 		return rc;
1144 
1145 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1146 out:
1147 	tee_mm_free(mm);
1148 	return rc;
1149 }
1150 
1151 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1152 				  uint64_t *global_handle,
1153 				  struct ffa_rxtx *rxtx)
1154 {
1155 	struct ffa_mem_transaction_x mem_trans = { };
1156 	int rc = FFA_DENIED;
1157 
1158 	cpu_spin_lock(&rxtx->spinlock);
1159 
1160 	if (!rxtx->rx || flen > rxtx->size)
1161 		goto out;
1162 
1163 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1164 				       &mem_trans);
1165 	if (rc)
1166 		goto out;
1167 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1168 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1169 				       global_handle, NULL);
1170 		goto out;
1171 	}
1172 
1173 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1174 	    virt_set_guest(mem_trans.sender_id))
1175 		goto out;
1176 
1177 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1178 			   global_handle);
1179 
1180 	virt_unset_guest();
1181 
1182 out:
1183 	cpu_spin_unlock(&rxtx->spinlock);
1184 
1185 	return rc;
1186 }
1187 
1188 static void handle_mem_share(struct thread_smc_args *args,
1189 			     struct ffa_rxtx *rxtx)
1190 {
1191 	uint32_t tot_len = args->a1;
1192 	uint32_t frag_len = args->a2;
1193 	uint64_t addr = args->a3;
1194 	uint32_t page_count = args->a4;
1195 	uint32_t ret_w1 = 0;
1196 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1197 	uint32_t ret_w3 = 0;
1198 	uint32_t ret_fid = FFA_ERROR;
1199 	uint64_t global_handle = 0;
1200 	int rc = 0;
1201 
1202 	/* Check that the MBZs are indeed 0 */
1203 	if (args->a5 || args->a6 || args->a7)
1204 		goto out;
1205 
1206 	/* Check that fragment length doesn't exceed total length */
1207 	if (frag_len > tot_len)
1208 		goto out;
1209 
1210 	/* Check for 32-bit calling convention */
1211 	if (args->a0 == FFA_MEM_SHARE_32)
1212 		addr &= UINT32_MAX;
1213 
1214 	if (!addr) {
1215 		/*
1216 		 * The memory transaction descriptor is passed via our rx
1217 		 * buffer.
1218 		 */
1219 		if (page_count)
1220 			goto out;
1221 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1222 					    rxtx);
1223 	} else {
1224 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1225 					   &global_handle, rxtx);
1226 	}
1227 	if (rc < 0) {
1228 		ret_w2 = rc;
1229 	} else if (rc > 0) {
1230 		ret_fid = FFA_MEM_FRAG_RX;
1231 		ret_w3 = rc;
1232 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1233 	} else {
1234 		ret_fid = FFA_SUCCESS_32;
1235 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1236 	}
1237 out:
1238 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1239 }
1240 
1241 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1242 {
1243 	struct mem_frag_state *s = NULL;
1244 
1245 	SLIST_FOREACH(s, &frag_state_head, link)
1246 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1247 			return s;
1248 
1249 	return NULL;
1250 }
1251 
1252 static void handle_mem_frag_tx(struct thread_smc_args *args,
1253 			       struct ffa_rxtx *rxtx)
1254 {
1255 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1256 	size_t flen = args->a3;
1257 	uint32_t endpoint_id = args->a4;
1258 	struct mem_frag_state *s = NULL;
1259 	tee_mm_entry_t *mm = NULL;
1260 	unsigned int page_count = 0;
1261 	void *buf = NULL;
1262 	uint32_t ret_w1 = 0;
1263 	uint32_t ret_w2 = 0;
1264 	uint32_t ret_w3 = 0;
1265 	uint32_t ret_fid = 0;
1266 	int rc = 0;
1267 
1268 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1269 		uint16_t guest_id = endpoint_id >> 16;
1270 
1271 		if (!guest_id || virt_set_guest(guest_id)) {
1272 			rc = FFA_INVALID_PARAMETERS;
1273 			goto out_set_rc;
1274 		}
1275 	}
1276 
1277 	/*
1278 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1279 	 * requests.
1280 	 */
1281 
1282 	cpu_spin_lock(&rxtx->spinlock);
1283 
1284 	s = get_frag_state(global_handle);
1285 	if (!s) {
1286 		rc = FFA_INVALID_PARAMETERS;
1287 		goto out;
1288 	}
1289 
1290 	mm = s->mm;
1291 	if (mm) {
1292 		if (flen > tee_mm_get_bytes(mm)) {
1293 			rc = FFA_INVALID_PARAMETERS;
1294 			goto out;
1295 		}
1296 		page_count = s->share.page_count;
1297 		buf = (void *)tee_mm_get_smem(mm);
1298 	} else {
1299 		if (flen > rxtx->size) {
1300 			rc = FFA_INVALID_PARAMETERS;
1301 			goto out;
1302 		}
1303 		buf = rxtx->rx;
1304 	}
1305 
1306 	rc = add_mem_share_frag(s, buf, flen);
1307 out:
1308 	virt_unset_guest();
1309 	cpu_spin_unlock(&rxtx->spinlock);
1310 
1311 	if (rc <= 0 && mm) {
1312 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1313 		tee_mm_free(mm);
1314 	}
1315 
1316 out_set_rc:
1317 	if (rc < 0) {
1318 		ret_fid = FFA_ERROR;
1319 		ret_w2 = rc;
1320 	} else if (rc > 0) {
1321 		ret_fid = FFA_MEM_FRAG_RX;
1322 		ret_w3 = rc;
1323 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1324 	} else {
1325 		ret_fid = FFA_SUCCESS_32;
1326 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1327 	}
1328 
1329 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1330 }
1331 
1332 static void handle_mem_reclaim(struct thread_smc_args *args)
1333 {
1334 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1335 	uint32_t ret_fid = FFA_ERROR;
1336 	uint64_t cookie = 0;
1337 
1338 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1339 		goto out;
1340 
1341 	cookie = reg_pair_to_64(args->a2, args->a1);
1342 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1343 		uint16_t guest_id = 0;
1344 
1345 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1346 			guest_id = virt_find_guest_by_cookie(cookie);
1347 		} else {
1348 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1349 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1350 		}
1351 		if (!guest_id || virt_set_guest(guest_id))
1352 			goto out;
1353 	}
1354 
1355 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1356 	case TEE_SUCCESS:
1357 		ret_fid = FFA_SUCCESS_32;
1358 		ret_val = 0;
1359 		break;
1360 	case TEE_ERROR_ITEM_NOT_FOUND:
1361 		DMSG("cookie %#"PRIx64" not found", cookie);
1362 		ret_val = FFA_INVALID_PARAMETERS;
1363 		break;
1364 	default:
1365 		DMSG("cookie %#"PRIx64" busy", cookie);
1366 		ret_val = FFA_DENIED;
1367 		break;
1368 	}
1369 
1370 	virt_unset_guest();
1371 
1372 out:
1373 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1374 }
1375 
1376 static void handle_notification_bitmap_create(struct thread_smc_args *args)
1377 {
1378 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1379 	uint32_t ret_fid = FFA_ERROR;
1380 	uint32_t old_itr_status = 0;
1381 
1382 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1383 	    !args->a5 && !args->a6 && !args->a7) {
1384 		uint16_t vm_id = args->a1;
1385 
1386 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1387 
1388 		if (notif_vm_id_valid) {
1389 			if (vm_id == notif_vm_id)
1390 				ret_val = FFA_DENIED;
1391 			else
1392 				ret_val = FFA_NO_MEMORY;
1393 		} else {
1394 			notif_vm_id = vm_id;
1395 			notif_vm_id_valid = true;
1396 			ret_val = FFA_OK;
1397 			ret_fid = FFA_SUCCESS_32;
1398 		}
1399 
1400 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1401 	}
1402 
1403 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1404 }
1405 
1406 static void handle_notification_bitmap_destroy(struct thread_smc_args *args)
1407 {
1408 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1409 	uint32_t ret_fid = FFA_ERROR;
1410 	uint32_t old_itr_status = 0;
1411 
1412 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1413 	    !args->a5 && !args->a6 && !args->a7) {
1414 		uint16_t vm_id = args->a1;
1415 
1416 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1417 
1418 		if (notif_vm_id_valid && vm_id == notif_vm_id) {
1419 			if (notif_pending_bitmap || notif_bound_bitmap) {
1420 				ret_val = FFA_DENIED;
1421 			} else {
1422 				notif_vm_id_valid = false;
1423 				ret_val = FFA_OK;
1424 				ret_fid = FFA_SUCCESS_32;
1425 			}
1426 		}
1427 
1428 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1429 	}
1430 
1431 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1432 }
1433 
1434 static void handle_notification_bind(struct thread_smc_args *args)
1435 {
1436 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1437 	uint32_t ret_fid = FFA_ERROR;
1438 	uint32_t old_itr_status = 0;
1439 	uint64_t bitmap = 0;
1440 	uint16_t vm_id = 0;
1441 
1442 	if (args->a5 || args->a6 || args->a7)
1443 		goto out;
1444 	if (args->a2) {
1445 		/* We only deal with global notifications for now */
1446 		ret_val = FFA_NOT_SUPPORTED;
1447 		goto out;
1448 	}
1449 
1450 	/* The destination of the eventual notification */
1451 	vm_id = FFA_DST(args->a1);
1452 	bitmap = reg_pair_to_64(args->a4, args->a3);
1453 
1454 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1455 
1456 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1457 		if (bitmap & notif_bound_bitmap) {
1458 			ret_val = FFA_DENIED;
1459 		} else {
1460 			notif_bound_bitmap |= bitmap;
1461 			ret_val = FFA_OK;
1462 			ret_fid = FFA_SUCCESS_32;
1463 		}
1464 	}
1465 
1466 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1467 out:
1468 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1469 }
1470 
1471 static void handle_notification_unbind(struct thread_smc_args *args)
1472 {
1473 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1474 	uint32_t ret_fid = FFA_ERROR;
1475 	uint32_t old_itr_status = 0;
1476 	uint64_t bitmap = 0;
1477 	uint16_t vm_id = 0;
1478 
1479 	if (args->a2 || args->a5 || args->a6 || args->a7)
1480 		goto out;
1481 
1482 	/* The destination of the eventual notification */
1483 	vm_id = FFA_DST(args->a1);
1484 	bitmap = reg_pair_to_64(args->a4, args->a3);
1485 
1486 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1487 
1488 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1489 		/*
1490 		 * Spec says:
1491 		 * At least one notification is bound to another Sender or
1492 		 * is currently pending.
1493 		 *
1494 		 * Not sure what the intention is.
1495 		 */
1496 		if (bitmap & notif_pending_bitmap) {
1497 			ret_val = FFA_DENIED;
1498 		} else {
1499 			notif_bound_bitmap &= ~bitmap;
1500 			ret_val = FFA_OK;
1501 			ret_fid = FFA_SUCCESS_32;
1502 		}
1503 	}
1504 
1505 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1506 out:
1507 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1508 }
1509 
1510 static void handle_notification_get(struct thread_smc_args *args)
1511 {
1512 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1513 	uint32_t ret_fid = FFA_ERROR;
1514 	uint32_t old_itr_status = 0;
1515 	uint16_t vm_id = 0;
1516 	uint32_t w3 = 0;
1517 
1518 	if (args->a5 || args->a6 || args->a7)
1519 		goto out;
1520 	if (!(args->a2 & 0x1)) {
1521 		ret_fid = FFA_SUCCESS_32;
1522 		w2 = 0;
1523 		goto out;
1524 	}
1525 	vm_id = FFA_DST(args->a1);
1526 
1527 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1528 
1529 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1530 		reg_pair_from_64(notif_pending_bitmap, &w3, &w2);
1531 		notif_pending_bitmap = 0;
1532 		ret_fid = FFA_SUCCESS_32;
1533 	}
1534 
1535 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1536 out:
1537 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1538 }
1539 
1540 static void handle_notification_info_get(struct thread_smc_args *args)
1541 {
1542 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1543 	uint32_t ret_fid = FFA_ERROR;
1544 
1545 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1546 	    args->a6 || args->a7)
1547 		goto out;
1548 
1549 	if (OPTEE_SMC_IS_64(args->a0))
1550 		ret_fid = FFA_SUCCESS_64;
1551 	else
1552 		ret_fid = FFA_SUCCESS_32;
1553 
1554 	/*
1555 	 * Note, we're only supporting physical OS kernel in normal world
1556 	 * with Global Notifications.
1557 	 * So one list of ID list registers (BIT[11:7])
1558 	 * and one count of IDs (BIT[13:12] + 1)
1559 	 * and the VM is always 0.
1560 	 */
1561 	w2 = SHIFT_U32(1, 7);
1562 out:
1563 	spmc_set_args(args, ret_fid, 0, w2, 0, 0, 0);
1564 }
1565 
1566 void thread_spmc_set_async_notif_intid(int intid)
1567 {
1568 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1569 	notif_intid = intid;
1570 	spmc_notif_is_ready = true;
1571 	DMSG("Asynchronous notifications are ready");
1572 }
1573 
1574 void notif_send_async(uint32_t value)
1575 {
1576 	uint32_t old_itr_status = 0;
1577 
1578 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1579 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1580 	       do_bottom_half_value >= 0 && notif_intid >= 0);
1581 	notif_pending_bitmap |= BIT64(do_bottom_half_value);
1582 	interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1583 			    ITR_CPU_MASK_TO_THIS_CPU);
1584 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1585 }
1586 #else
1587 void __noreturn notif_send_async(uint32_t value __unused)
1588 {
1589 	panic();
1590 }
1591 #endif
1592 
1593 /* Only called from assembly */
1594 void thread_spmc_msg_recv(struct thread_smc_args *args);
1595 void thread_spmc_msg_recv(struct thread_smc_args *args)
1596 {
1597 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1598 	switch (args->a0) {
1599 #if defined(CFG_CORE_SEL1_SPMC)
1600 	case FFA_FEATURES:
1601 		handle_features(args);
1602 		break;
1603 	case FFA_SPM_ID_GET:
1604 		handle_spm_id_get(args);
1605 		break;
1606 #ifdef ARM64
1607 	case FFA_RXTX_MAP_64:
1608 #endif
1609 	case FFA_RXTX_MAP_32:
1610 		spmc_handle_rxtx_map(args, &my_rxtx);
1611 		break;
1612 	case FFA_RXTX_UNMAP:
1613 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1614 		break;
1615 	case FFA_RX_RELEASE:
1616 		spmc_handle_rx_release(args, &my_rxtx);
1617 		break;
1618 	case FFA_PARTITION_INFO_GET:
1619 		spmc_handle_partition_info_get(args, &my_rxtx);
1620 		break;
1621 	case FFA_RUN:
1622 		spmc_handle_run(args);
1623 		break;
1624 #endif /*CFG_CORE_SEL1_SPMC*/
1625 	case FFA_INTERRUPT:
1626 		interrupt_main_handler();
1627 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1628 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1629 				      0, 0);
1630 		else
1631 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1632 		break;
1633 #ifdef ARM64
1634 	case FFA_MSG_SEND_DIRECT_REQ_64:
1635 #endif
1636 	case FFA_MSG_SEND_DIRECT_REQ_32:
1637 		handle_direct_request(args, &my_rxtx);
1638 		break;
1639 #if defined(CFG_CORE_SEL1_SPMC)
1640 #ifdef ARM64
1641 	case FFA_MEM_SHARE_64:
1642 #endif
1643 	case FFA_MEM_SHARE_32:
1644 		handle_mem_share(args, &my_rxtx);
1645 		break;
1646 	case FFA_MEM_RECLAIM:
1647 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1648 		    !ffa_mem_reclaim(args, NULL))
1649 			handle_mem_reclaim(args);
1650 		break;
1651 	case FFA_MEM_FRAG_TX:
1652 		handle_mem_frag_tx(args, &my_rxtx);
1653 		break;
1654 	case FFA_NOTIFICATION_BITMAP_CREATE:
1655 		handle_notification_bitmap_create(args);
1656 		break;
1657 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1658 		handle_notification_bitmap_destroy(args);
1659 		break;
1660 	case FFA_NOTIFICATION_BIND:
1661 		handle_notification_bind(args);
1662 		break;
1663 	case FFA_NOTIFICATION_UNBIND:
1664 		handle_notification_unbind(args);
1665 		break;
1666 	case FFA_NOTIFICATION_GET:
1667 		handle_notification_get(args);
1668 		break;
1669 #ifdef ARM64
1670 	case FFA_NOTIFICATION_INFO_GET_64:
1671 #endif
1672 	case FFA_NOTIFICATION_INFO_GET_32:
1673 		handle_notification_info_get(args);
1674 		break;
1675 #endif /*CFG_CORE_SEL1_SPMC*/
1676 	case FFA_ERROR:
1677 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1678 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1679 			/*
1680 			 * The SPMC will return an FFA_ERROR back so better
1681 			 * panic() now than flooding the log.
1682 			 */
1683 			panic("FFA_ERROR from SPMC is fatal");
1684 		}
1685 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1686 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1687 		break;
1688 	default:
1689 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1690 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1691 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1692 	}
1693 }
1694 
1695 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1696 {
1697 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1698 	struct thread_ctx *thr = threads + thread_get_id();
1699 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1700 	struct optee_msg_arg *arg = NULL;
1701 	struct mobj *mobj = NULL;
1702 	uint32_t num_params = 0;
1703 	size_t sz = 0;
1704 
1705 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1706 	if (!mobj) {
1707 		EMSG("Can't find cookie %#"PRIx64, cookie);
1708 		return TEE_ERROR_BAD_PARAMETERS;
1709 	}
1710 
1711 	res = mobj_inc_map(mobj);
1712 	if (res)
1713 		goto out_put_mobj;
1714 
1715 	res = TEE_ERROR_BAD_PARAMETERS;
1716 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1717 	if (!arg)
1718 		goto out_dec_map;
1719 
1720 	num_params = READ_ONCE(arg->num_params);
1721 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1722 		goto out_dec_map;
1723 
1724 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1725 
1726 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1727 	if (!thr->rpc_arg)
1728 		goto out_dec_map;
1729 
1730 	virt_on_stdcall();
1731 	res = tee_entry_std(arg, num_params);
1732 
1733 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1734 	thr->rpc_arg = NULL;
1735 
1736 out_dec_map:
1737 	mobj_dec_map(mobj);
1738 out_put_mobj:
1739 	mobj_put(mobj);
1740 	return res;
1741 }
1742 
1743 /*
1744  * Helper routine for the assembly function thread_std_smc_entry()
1745  *
1746  * Note: this function is weak just to make link_dummies_paged.c happy.
1747  */
1748 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1749 				       uint32_t a2, uint32_t a3,
1750 				       uint32_t a4, uint32_t a5 __unused)
1751 {
1752 	/*
1753 	 * Arguments are supplied from handle_yielding_call() as:
1754 	 * a0 <- w1
1755 	 * a1 <- w3
1756 	 * a2 <- w4
1757 	 * a3 <- w5
1758 	 * a4 <- w6
1759 	 * a5 <- w7
1760 	 */
1761 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1762 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1763 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1764 	return FFA_DENIED;
1765 }
1766 
1767 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1768 {
1769 	uint64_t offs = tpm->u.memref.offs;
1770 
1771 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1772 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1773 
1774 	param->u.fmem.offs_low = offs;
1775 	param->u.fmem.offs_high = offs >> 32;
1776 	if (param->u.fmem.offs_high != offs >> 32)
1777 		return false;
1778 
1779 	param->u.fmem.size = tpm->u.memref.size;
1780 	if (tpm->u.memref.mobj) {
1781 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1782 
1783 		/* If a mobj is passed it better be one with a valid cookie. */
1784 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1785 			return false;
1786 		param->u.fmem.global_id = cookie;
1787 	} else {
1788 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1789 	}
1790 
1791 	return true;
1792 }
1793 
1794 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1795 			    struct thread_param *params,
1796 			    struct optee_msg_arg **arg_ret)
1797 {
1798 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1799 	struct thread_ctx *thr = threads + thread_get_id();
1800 	struct optee_msg_arg *arg = thr->rpc_arg;
1801 
1802 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1803 		return TEE_ERROR_BAD_PARAMETERS;
1804 
1805 	if (!arg) {
1806 		EMSG("rpc_arg not set");
1807 		return TEE_ERROR_GENERIC;
1808 	}
1809 
1810 	memset(arg, 0, sz);
1811 	arg->cmd = cmd;
1812 	arg->num_params = num_params;
1813 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1814 
1815 	for (size_t n = 0; n < num_params; n++) {
1816 		switch (params[n].attr) {
1817 		case THREAD_PARAM_ATTR_NONE:
1818 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1819 			break;
1820 		case THREAD_PARAM_ATTR_VALUE_IN:
1821 		case THREAD_PARAM_ATTR_VALUE_OUT:
1822 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1823 			arg->params[n].attr = params[n].attr -
1824 					      THREAD_PARAM_ATTR_VALUE_IN +
1825 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1826 			arg->params[n].u.value.a = params[n].u.value.a;
1827 			arg->params[n].u.value.b = params[n].u.value.b;
1828 			arg->params[n].u.value.c = params[n].u.value.c;
1829 			break;
1830 		case THREAD_PARAM_ATTR_MEMREF_IN:
1831 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1832 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1833 			if (!set_fmem(arg->params + n, params + n))
1834 				return TEE_ERROR_BAD_PARAMETERS;
1835 			break;
1836 		default:
1837 			return TEE_ERROR_BAD_PARAMETERS;
1838 		}
1839 	}
1840 
1841 	if (arg_ret)
1842 		*arg_ret = arg;
1843 
1844 	return TEE_SUCCESS;
1845 }
1846 
1847 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1848 				struct thread_param *params)
1849 {
1850 	for (size_t n = 0; n < num_params; n++) {
1851 		switch (params[n].attr) {
1852 		case THREAD_PARAM_ATTR_VALUE_OUT:
1853 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1854 			params[n].u.value.a = arg->params[n].u.value.a;
1855 			params[n].u.value.b = arg->params[n].u.value.b;
1856 			params[n].u.value.c = arg->params[n].u.value.c;
1857 			break;
1858 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1859 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1860 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1861 			break;
1862 		default:
1863 			break;
1864 		}
1865 	}
1866 
1867 	return arg->ret;
1868 }
1869 
1870 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1871 			struct thread_param *params)
1872 {
1873 	struct thread_rpc_arg rpc_arg = { .call = {
1874 			.w1 = thread_get_tsd()->rpc_target_info,
1875 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1876 		},
1877 	};
1878 	struct optee_msg_arg *arg = NULL;
1879 	uint32_t ret = 0;
1880 
1881 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1882 	if (ret)
1883 		return ret;
1884 
1885 	thread_rpc(&rpc_arg);
1886 
1887 	return get_rpc_arg_res(arg, num_params, params);
1888 }
1889 
1890 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1891 {
1892 	struct thread_rpc_arg rpc_arg = { .call = {
1893 			.w1 = thread_get_tsd()->rpc_target_info,
1894 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1895 		},
1896 	};
1897 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1898 	uint32_t res2 = 0;
1899 	uint32_t res = 0;
1900 
1901 	DMSG("freeing cookie %#"PRIx64, cookie);
1902 
1903 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1904 
1905 	mobj_put(mobj);
1906 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1907 	if (res2)
1908 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1909 		     cookie, res2);
1910 	if (!res)
1911 		thread_rpc(&rpc_arg);
1912 }
1913 
1914 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1915 {
1916 	struct thread_rpc_arg rpc_arg = { .call = {
1917 			.w1 = thread_get_tsd()->rpc_target_info,
1918 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1919 		},
1920 	};
1921 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1922 	struct optee_msg_arg *arg = NULL;
1923 	unsigned int internal_offset = 0;
1924 	struct mobj *mobj = NULL;
1925 	uint64_t cookie = 0;
1926 
1927 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1928 		return NULL;
1929 
1930 	thread_rpc(&rpc_arg);
1931 
1932 	if (arg->num_params != 1 ||
1933 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1934 		return NULL;
1935 
1936 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1937 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
1938 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
1939 	if (!mobj) {
1940 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
1941 		     cookie, internal_offset);
1942 		return NULL;
1943 	}
1944 
1945 	assert(mobj_is_nonsec(mobj));
1946 
1947 	if (mobj->size < size) {
1948 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
1949 		mobj_put(mobj);
1950 		return NULL;
1951 	}
1952 
1953 	if (mobj_inc_map(mobj)) {
1954 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
1955 		mobj_put(mobj);
1956 		return NULL;
1957 	}
1958 
1959 	return mobj;
1960 }
1961 
1962 struct mobj *thread_rpc_alloc_payload(size_t size)
1963 {
1964 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
1965 }
1966 
1967 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
1968 {
1969 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
1970 }
1971 
1972 void thread_rpc_free_kernel_payload(struct mobj *mobj)
1973 {
1974 	if (mobj)
1975 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
1976 				mobj_get_cookie(mobj), mobj);
1977 }
1978 
1979 void thread_rpc_free_payload(struct mobj *mobj)
1980 {
1981 	if (mobj)
1982 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
1983 				mobj);
1984 }
1985 
1986 struct mobj *thread_rpc_alloc_global_payload(size_t size)
1987 {
1988 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
1989 }
1990 
1991 void thread_rpc_free_global_payload(struct mobj *mobj)
1992 {
1993 	if (mobj)
1994 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
1995 				mobj_get_cookie(mobj), mobj);
1996 }
1997 
1998 void thread_spmc_register_secondary_ep(vaddr_t ep)
1999 {
2000 	unsigned long ret = 0;
2001 
2002 	/* Let the SPM know the entry point for secondary CPUs */
2003 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2004 
2005 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2006 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2007 }
2008 
2009 #if defined(CFG_CORE_SEL1_SPMC)
2010 static TEE_Result spmc_init(void)
2011 {
2012 	my_endpoint_id = SPMC_ENDPOINT_ID;
2013 	DMSG("My endpoint ID %#x", my_endpoint_id);
2014 
2015 	/*
2016 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2017 	 * normal world regardless of what version we query the SPM with.
2018 	 * However, if SPMD think we are version 1.1 it will forward
2019 	 * queries from normal world to let us negotiate version. So by
2020 	 * setting version 1.0 here we should be compatible.
2021 	 *
2022 	 * Note that disagreement on negotiated version means that we'll
2023 	 * have communication problems with normal world.
2024 	 */
2025 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2026 
2027 	return TEE_SUCCESS;
2028 }
2029 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2030 static bool is_ffa_success(uint32_t fid)
2031 {
2032 #ifdef ARM64
2033 	if (fid == FFA_SUCCESS_64)
2034 		return true;
2035 #endif
2036 	return fid == FFA_SUCCESS_32;
2037 }
2038 
2039 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2040 {
2041 	struct thread_smc_args args = {
2042 #ifdef ARM64
2043 		.a0 = FFA_RXTX_MAP_64,
2044 #else
2045 		.a0 = FFA_RXTX_MAP_32,
2046 #endif
2047 		.a1 = virt_to_phys(rxtx->tx),
2048 		.a2 = virt_to_phys(rxtx->rx),
2049 		.a3 = 1,
2050 	};
2051 
2052 	thread_smccc(&args);
2053 	if (!is_ffa_success(args.a0)) {
2054 		if (args.a0 == FFA_ERROR)
2055 			EMSG("rxtx map failed with error %ld", args.a2);
2056 		else
2057 			EMSG("rxtx map failed");
2058 		panic();
2059 	}
2060 }
2061 
2062 static uint16_t get_my_id(void)
2063 {
2064 	struct thread_smc_args args = {
2065 		.a0 = FFA_ID_GET,
2066 	};
2067 
2068 	thread_smccc(&args);
2069 	if (!is_ffa_success(args.a0)) {
2070 		if (args.a0 == FFA_ERROR)
2071 			EMSG("Get id failed with error %ld", args.a2);
2072 		else
2073 			EMSG("Get id failed");
2074 		panic();
2075 	}
2076 
2077 	return args.a2;
2078 }
2079 
2080 static uint32_t get_ffa_version(uint32_t my_version)
2081 {
2082 	struct thread_smc_args args = {
2083 		.a0 = FFA_VERSION,
2084 		.a1 = my_version,
2085 	};
2086 
2087 	thread_smccc(&args);
2088 	if (args.a0 & BIT(31)) {
2089 		EMSG("FF-A version failed with error %ld", args.a0);
2090 		panic();
2091 	}
2092 
2093 	return args.a0;
2094 }
2095 
2096 static void *spmc_retrieve_req(uint64_t cookie,
2097 			       struct ffa_mem_transaction_x *trans)
2098 {
2099 	struct ffa_mem_access *acc_descr_array = NULL;
2100 	struct ffa_mem_access_perm *perm_descr = NULL;
2101 	struct thread_smc_args args = {
2102 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2103 		.a3 =	0,	/* Address, Using TX -> MBZ */
2104 		.a4 =   0,	/* Using TX -> MBZ */
2105 	};
2106 	size_t size = 0;
2107 	int rc = 0;
2108 
2109 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2110 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2111 
2112 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2113 		memset(trans_descr, 0, size);
2114 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2115 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2116 		trans_descr->global_handle = cookie;
2117 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2118 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2119 		trans_descr->mem_access_count = 1;
2120 		acc_descr_array = trans_descr->mem_access_array;
2121 	} else {
2122 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2123 
2124 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2125 		memset(trans_descr, 0, size);
2126 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2127 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2128 		trans_descr->global_handle = cookie;
2129 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2130 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2131 		trans_descr->mem_access_count = 1;
2132 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2133 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2134 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2135 					   sizeof(*trans_descr));
2136 	}
2137 	acc_descr_array->region_offs = 0;
2138 	acc_descr_array->reserved = 0;
2139 	perm_descr = &acc_descr_array->access_perm;
2140 	perm_descr->endpoint_id = my_endpoint_id;
2141 	perm_descr->perm = FFA_MEM_ACC_RW;
2142 	perm_descr->flags = 0;
2143 
2144 	args.a1 = size; /* Total Length */
2145 	args.a2 = size; /* Frag Length == Total length */
2146 	thread_smccc(&args);
2147 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2148 		if (args.a0 == FFA_ERROR)
2149 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2150 			     cookie, (int)args.a2);
2151 		else
2152 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2153 			     cookie, args.a0);
2154 		return NULL;
2155 	}
2156 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2157 				       my_rxtx.size, trans);
2158 	if (rc) {
2159 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2160 		     cookie, rc);
2161 		return NULL;
2162 	}
2163 
2164 	return my_rxtx.rx;
2165 }
2166 
2167 void thread_spmc_relinquish(uint64_t cookie)
2168 {
2169 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2170 	struct thread_smc_args args = {
2171 		.a0 = FFA_MEM_RELINQUISH,
2172 	};
2173 
2174 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2175 	relinquish_desc->handle = cookie;
2176 	relinquish_desc->flags = 0;
2177 	relinquish_desc->endpoint_count = 1;
2178 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
2179 	thread_smccc(&args);
2180 	if (!is_ffa_success(args.a0))
2181 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2182 }
2183 
2184 static int set_pages(struct ffa_address_range *regions,
2185 		     unsigned int num_regions, unsigned int num_pages,
2186 		     struct mobj_ffa *mf)
2187 {
2188 	unsigned int n = 0;
2189 	unsigned int idx = 0;
2190 
2191 	for (n = 0; n < num_regions; n++) {
2192 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2193 		uint64_t addr = READ_ONCE(regions[n].address);
2194 
2195 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2196 			return FFA_INVALID_PARAMETERS;
2197 	}
2198 
2199 	if (idx != num_pages)
2200 		return FFA_INVALID_PARAMETERS;
2201 
2202 	return 0;
2203 }
2204 
2205 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2206 {
2207 	struct mobj_ffa *ret = NULL;
2208 	struct ffa_mem_transaction_x retrieve_desc = { };
2209 	struct ffa_mem_access *descr_array = NULL;
2210 	struct ffa_mem_region *descr = NULL;
2211 	struct mobj_ffa *mf = NULL;
2212 	unsigned int num_pages = 0;
2213 	unsigned int offs = 0;
2214 	void *buf = NULL;
2215 	struct thread_smc_args ffa_rx_release_args = {
2216 		.a0 = FFA_RX_RELEASE
2217 	};
2218 
2219 	/*
2220 	 * OP-TEE is only supporting a single mem_region while the
2221 	 * specification allows for more than one.
2222 	 */
2223 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2224 	if (!buf) {
2225 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2226 		     cookie);
2227 		return NULL;
2228 	}
2229 
2230 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2231 	offs = READ_ONCE(descr_array->region_offs);
2232 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2233 
2234 	num_pages = READ_ONCE(descr->total_page_count);
2235 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2236 	if (!mf)
2237 		goto out;
2238 
2239 	if (set_pages(descr->address_range_array,
2240 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2241 		mobj_ffa_spmc_delete(mf);
2242 		goto out;
2243 	}
2244 
2245 	ret = mf;
2246 
2247 out:
2248 	/* Release RX buffer after the mem retrieve request. */
2249 	thread_smccc(&ffa_rx_release_args);
2250 
2251 	return ret;
2252 }
2253 
2254 static TEE_Result spmc_init(void)
2255 {
2256 	unsigned int major = 0;
2257 	unsigned int minor __maybe_unused = 0;
2258 	uint32_t my_vers = 0;
2259 	uint32_t vers = 0;
2260 
2261 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2262 	vers = get_ffa_version(my_vers);
2263 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2264 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2265 	DMSG("SPMC reported version %u.%u", major, minor);
2266 	if (major != FFA_VERSION_MAJOR) {
2267 		EMSG("Incompatible major version %u, expected %u",
2268 		     major, FFA_VERSION_MAJOR);
2269 		panic();
2270 	}
2271 	if (vers < my_vers)
2272 		my_vers = vers;
2273 	DMSG("Using version %u.%u",
2274 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2275 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2276 	my_rxtx.ffa_vers = my_vers;
2277 
2278 	spmc_rxtx_map(&my_rxtx);
2279 	my_endpoint_id = get_my_id();
2280 	DMSG("My endpoint ID %#x", my_endpoint_id);
2281 
2282 	return TEE_SUCCESS;
2283 }
2284 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2285 
2286 /*
2287  * boot_final() is always done before exiting at end of boot
2288  * initialization.  In case of virtualization the init-calls are done only
2289  * once a OP-TEE partition has been created. So with virtualization we have
2290  * to initialize via boot_final() to make sure we have a value assigned
2291  * before it's used the first time.
2292  */
2293 #ifdef CFG_NS_VIRTUALIZATION
2294 boot_final(spmc_init);
2295 #else
2296 service_init(spmc_init);
2297 #endif
2298