xref: /optee_os/core/arch/arm/kernel/thread_spmc.c (revision 209c34dc03563af70f1e406f304008495dae7a5e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2020-2023, Linaro Limited.
4  * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
5  */
6 
7 #include <assert.h>
8 #include <ffa.h>
9 #include <initcall.h>
10 #include <io.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/notif.h>
13 #include <kernel/panic.h>
14 #include <kernel/secure_partition.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/spmc_sp_handler.h>
17 #include <kernel/tee_misc.h>
18 #include <kernel/thread.h>
19 #include <kernel/thread_private.h>
20 #include <kernel/thread_spmc.h>
21 #include <kernel/virtualization.h>
22 #include <mm/core_mmu.h>
23 #include <mm/mobj.h>
24 #include <optee_ffa.h>
25 #include <optee_msg.h>
26 #include <optee_rpc_cmd.h>
27 #include <sm/optee_smc.h>
28 #include <string.h>
29 #include <sys/queue.h>
30 #include <tee/entry_std.h>
31 #include <tee/uuid.h>
32 #include <util.h>
33 
34 #if defined(CFG_CORE_SEL1_SPMC)
35 struct mem_share_state {
36 	struct mobj_ffa *mf;
37 	unsigned int page_count;
38 	unsigned int region_count;
39 	unsigned int current_page_idx;
40 };
41 
42 struct mem_frag_state {
43 	struct mem_share_state share;
44 	tee_mm_entry_t *mm;
45 	unsigned int frag_offset;
46 	SLIST_ENTRY(mem_frag_state) link;
47 };
48 #endif
49 
50 static unsigned int spmc_notif_lock = SPINLOCK_UNLOCK;
51 static int do_bottom_half_value = -1;
52 static uint16_t notif_vm_id;
53 static bool spmc_notif_is_ready;
54 
55 /* Initialized in spmc_init() below */
56 static uint16_t my_endpoint_id __nex_bss;
57 #ifdef CFG_CORE_SEL1_SPMC
58 static const uint32_t my_part_props = FFA_PART_PROP_DIRECT_REQ_RECV |
59 				      FFA_PART_PROP_DIRECT_REQ_SEND |
60 #ifdef CFG_NS_VIRTUALIZATION
61 				      FFA_PART_PROP_NOTIF_CREATED |
62 				      FFA_PART_PROP_NOTIF_DESTROYED |
63 #endif
64 #ifdef ARM64
65 				      FFA_PART_PROP_AARCH64_STATE |
66 #endif
67 				      FFA_PART_PROP_IS_PE_ID;
68 
69 static uint32_t my_uuid_words[] = {
70 	/*
71 	 * - if the SPMC is in S-EL2 this UUID describes OP-TEE as a S-EL1
72 	 *   SP, or
73 	 * - if the SPMC is in S-EL1 then this UUID is for OP-TEE as a
74 	 *   logical partition, residing in the same exception level as the
75 	 *   SPMC
76 	 * UUID 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
77 	 */
78 	0xe0786148, 0xe311f8e7, 0x02005ebc, 0x1bc5d5a5,
79 };
80 
81 /*
82  * If struct ffa_rxtx::size is 0 RX/TX buffers are not mapped or initialized.
83  *
84  * struct ffa_rxtx::spin_lock protects the variables below from concurrent
85  * access this includes the use of content of struct ffa_rxtx::rx and
86  * @frag_state_head.
87  *
88  * struct ffa_rxtx::tx_buf_is_mine is true when we may write to struct
89  * ffa_rxtx::tx and false when it is owned by normal world.
90  *
91  * Note that we can't prevent normal world from updating the content of
92  * these buffers so we must always be careful when reading. while we hold
93  * the lock.
94  */
95 
96 static struct ffa_rxtx my_rxtx __nex_bss;
97 
98 static bool is_nw_buf(struct ffa_rxtx *rxtx)
99 {
100 	return rxtx == &my_rxtx;
101 }
102 
103 static SLIST_HEAD(mem_frag_state_head, mem_frag_state) frag_state_head =
104 	SLIST_HEAD_INITIALIZER(&frag_state_head);
105 
106 static uint64_t notif_pending_bitmap;
107 static uint64_t notif_bound_bitmap;
108 static bool notif_vm_id_valid;
109 static int notif_intid = -1;
110 #else
111 static uint8_t __rx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
112 static uint8_t __tx_buf[SMALL_PAGE_SIZE] __aligned(SMALL_PAGE_SIZE);
113 static struct ffa_rxtx my_rxtx = {
114 	.rx = __rx_buf,
115 	.tx = __tx_buf,
116 	.size = sizeof(__rx_buf),
117 };
118 #endif
119 
120 static uint32_t swap_src_dst(uint32_t src_dst)
121 {
122 	return (src_dst >> 16) | (src_dst << 16);
123 }
124 
125 static uint16_t get_sender_id(uint32_t src_dst)
126 {
127 	return src_dst >> 16;
128 }
129 
130 void spmc_set_args(struct thread_smc_args *args, uint32_t fid, uint32_t src_dst,
131 		   uint32_t w2, uint32_t w3, uint32_t w4, uint32_t w5)
132 {
133 	*args = (struct thread_smc_args){ .a0 = fid,
134 					  .a1 = src_dst,
135 					  .a2 = w2,
136 					  .a3 = w3,
137 					  .a4 = w4,
138 					  .a5 = w5, };
139 }
140 
141 uint32_t spmc_exchange_version(uint32_t vers, struct ffa_rxtx *rxtx)
142 {
143 	/*
144 	 * No locking, if the caller does concurrent calls to this it's
145 	 * only making a mess for itself. We must be able to renegotiate
146 	 * the FF-A version in order to support differing versions between
147 	 * the loader and the driver.
148 	 */
149 	if (vers < FFA_VERSION_1_1)
150 		rxtx->ffa_vers = FFA_VERSION_1_0;
151 	else
152 		rxtx->ffa_vers = FFA_VERSION_1_1;
153 
154 	return rxtx->ffa_vers;
155 }
156 
157 static bool is_ffa_success(uint32_t fid)
158 {
159 #ifdef ARM64
160 	if (fid == FFA_SUCCESS_64)
161 		return true;
162 #endif
163 	return fid == FFA_SUCCESS_32;
164 }
165 
166 static int32_t get_ffa_ret_code(const struct thread_smc_args *args)
167 {
168 	if (is_ffa_success(args->a0))
169 		return FFA_OK;
170 	if (args->a0 == FFA_ERROR && args->a2)
171 		return args->a2;
172 	return FFA_NOT_SUPPORTED;
173 }
174 
175 static int ffa_simple_call(uint32_t fid, unsigned long a1, unsigned long a2,
176 			   unsigned long a3, unsigned long a4)
177 {
178 	struct thread_smc_args args = {
179 		.a0 = fid,
180 		.a1 = a1,
181 		.a2 = a2,
182 		.a3 = a3,
183 		.a4 = a4,
184 	};
185 
186 	thread_smccc(&args);
187 
188 	return get_ffa_ret_code(&args);
189 }
190 
191 static int __maybe_unused ffa_features(uint32_t id)
192 {
193 	return ffa_simple_call(FFA_FEATURES, id, 0, 0, 0);
194 }
195 
196 static int __maybe_unused ffa_set_notification(uint16_t dst, uint16_t src,
197 					       uint32_t flags, uint64_t bitmap)
198 {
199 	return ffa_simple_call(FFA_NOTIFICATION_SET,
200 			       SHIFT_U32(src, 16) | dst, flags,
201 			       low32_from_64(bitmap), high32_from_64(bitmap));
202 }
203 
204 #if defined(CFG_CORE_SEL1_SPMC)
205 static void handle_features(struct thread_smc_args *args)
206 {
207 	uint32_t ret_fid = FFA_ERROR;
208 	uint32_t ret_w2 = FFA_NOT_SUPPORTED;
209 
210 	switch (args->a1) {
211 	case FFA_FEATURE_SCHEDULE_RECV_INTR:
212 		if (spmc_notif_is_ready) {
213 			ret_fid = FFA_SUCCESS_32;
214 			ret_w2 = notif_intid;
215 		}
216 		break;
217 
218 #ifdef ARM64
219 	case FFA_RXTX_MAP_64:
220 #endif
221 	case FFA_RXTX_MAP_32:
222 		ret_fid = FFA_SUCCESS_32;
223 		ret_w2 = 0; /* 4kB Minimum buffer size and alignment boundary */
224 		break;
225 #ifdef ARM64
226 	case FFA_MEM_SHARE_64:
227 #endif
228 	case FFA_MEM_SHARE_32:
229 		ret_fid = FFA_SUCCESS_32;
230 		/*
231 		 * Partition manager supports transmission of a memory
232 		 * transaction descriptor in a buffer dynamically allocated
233 		 * by the endpoint.
234 		 */
235 		ret_w2 = BIT(0);
236 		break;
237 
238 	case FFA_ERROR:
239 	case FFA_VERSION:
240 	case FFA_SUCCESS_32:
241 #ifdef ARM64
242 	case FFA_SUCCESS_64:
243 #endif
244 	case FFA_FEATURES:
245 	case FFA_SPM_ID_GET:
246 	case FFA_MEM_FRAG_TX:
247 	case FFA_MEM_RECLAIM:
248 	case FFA_MSG_SEND_DIRECT_REQ_64:
249 	case FFA_MSG_SEND_DIRECT_REQ_32:
250 	case FFA_INTERRUPT:
251 	case FFA_PARTITION_INFO_GET:
252 	case FFA_RXTX_UNMAP:
253 	case FFA_RX_RELEASE:
254 	case FFA_FEATURE_MANAGED_EXIT_INTR:
255 	case FFA_NOTIFICATION_BITMAP_CREATE:
256 	case FFA_NOTIFICATION_BITMAP_DESTROY:
257 	case FFA_NOTIFICATION_BIND:
258 	case FFA_NOTIFICATION_UNBIND:
259 	case FFA_NOTIFICATION_SET:
260 	case FFA_NOTIFICATION_GET:
261 	case FFA_NOTIFICATION_INFO_GET_32:
262 #ifdef ARM64
263 	case FFA_NOTIFICATION_INFO_GET_64:
264 #endif
265 		ret_fid = FFA_SUCCESS_32;
266 		ret_w2 = FFA_PARAM_MBZ;
267 		break;
268 	default:
269 		break;
270 	}
271 
272 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, ret_w2, FFA_PARAM_MBZ,
273 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
274 }
275 
276 static int map_buf(paddr_t pa, unsigned int sz, void **va_ret)
277 {
278 	tee_mm_entry_t *mm = NULL;
279 
280 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, sz))
281 		return FFA_INVALID_PARAMETERS;
282 
283 	mm = tee_mm_alloc(&tee_mm_shm, sz);
284 	if (!mm)
285 		return FFA_NO_MEMORY;
286 
287 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pa,
288 					  sz / SMALL_PAGE_SIZE,
289 					  MEM_AREA_NSEC_SHM)) {
290 		tee_mm_free(mm);
291 		return FFA_INVALID_PARAMETERS;
292 	}
293 
294 	*va_ret = (void *)tee_mm_get_smem(mm);
295 	return 0;
296 }
297 
298 static void handle_spm_id_get(struct thread_smc_args *args)
299 {
300 	spmc_set_args(args, FFA_SUCCESS_32, FFA_PARAM_MBZ, my_endpoint_id,
301 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
302 }
303 
304 static void unmap_buf(void *va, size_t sz)
305 {
306 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_shm, (vaddr_t)va);
307 
308 	assert(mm);
309 	core_mmu_unmap_pages(tee_mm_get_smem(mm), sz / SMALL_PAGE_SIZE);
310 	tee_mm_free(mm);
311 }
312 
313 void spmc_handle_rxtx_map(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
314 {
315 	int rc = 0;
316 	uint32_t ret_fid = FFA_ERROR;
317 	unsigned int sz = 0;
318 	paddr_t rx_pa = 0;
319 	paddr_t tx_pa = 0;
320 	void *rx = NULL;
321 	void *tx = NULL;
322 
323 	cpu_spin_lock(&rxtx->spinlock);
324 
325 	if (args->a3 & GENMASK_64(63, 6)) {
326 		rc = FFA_INVALID_PARAMETERS;
327 		goto out;
328 	}
329 
330 	sz = args->a3 * SMALL_PAGE_SIZE;
331 	if (!sz) {
332 		rc = FFA_INVALID_PARAMETERS;
333 		goto out;
334 	}
335 	/* TX/RX are swapped compared to the caller */
336 	tx_pa = args->a2;
337 	rx_pa = args->a1;
338 
339 	if (rxtx->size) {
340 		rc = FFA_DENIED;
341 		goto out;
342 	}
343 
344 	/*
345 	 * If the buffer comes from a SP the address is virtual and already
346 	 * mapped.
347 	 */
348 	if (is_nw_buf(rxtx)) {
349 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
350 			enum teecore_memtypes mt = MEM_AREA_NEX_NSEC_SHM;
351 			bool tx_alloced = false;
352 
353 			/*
354 			 * With virtualization we establish this mapping in
355 			 * the nexus mapping which then is replicated to
356 			 * each partition.
357 			 *
358 			 * This means that this mapping must be done before
359 			 * any partition is created and then must not be
360 			 * changed.
361 			 */
362 
363 			/*
364 			 * core_mmu_add_mapping() may reuse previous
365 			 * mappings. First check if there's any mappings to
366 			 * reuse so we know how to clean up in case of
367 			 * failure.
368 			 */
369 			tx = phys_to_virt(tx_pa, mt, sz);
370 			rx = phys_to_virt(rx_pa, mt, sz);
371 			if (!tx) {
372 				tx = core_mmu_add_mapping(mt, tx_pa, sz);
373 				if (!tx) {
374 					rc = FFA_NO_MEMORY;
375 					goto out;
376 				}
377 				tx_alloced = true;
378 			}
379 			if (!rx)
380 				rx = core_mmu_add_mapping(mt, rx_pa, sz);
381 
382 			if (!rx) {
383 				if (tx_alloced && tx)
384 					core_mmu_remove_mapping(mt, tx, sz);
385 				rc = FFA_NO_MEMORY;
386 				goto out;
387 			}
388 		} else {
389 			rc = map_buf(tx_pa, sz, &tx);
390 			if (rc)
391 				goto out;
392 			rc = map_buf(rx_pa, sz, &rx);
393 			if (rc) {
394 				unmap_buf(tx, sz);
395 				goto out;
396 			}
397 		}
398 		rxtx->tx = tx;
399 		rxtx->rx = rx;
400 	} else {
401 		if ((tx_pa & SMALL_PAGE_MASK) || (rx_pa & SMALL_PAGE_MASK)) {
402 			rc = FFA_INVALID_PARAMETERS;
403 			goto out;
404 		}
405 
406 		if (!virt_to_phys((void *)tx_pa) ||
407 		    !virt_to_phys((void *)rx_pa)) {
408 			rc = FFA_INVALID_PARAMETERS;
409 			goto out;
410 		}
411 
412 		rxtx->tx = (void *)tx_pa;
413 		rxtx->rx = (void *)rx_pa;
414 	}
415 
416 	rxtx->size = sz;
417 	rxtx->tx_is_mine = true;
418 	ret_fid = FFA_SUCCESS_32;
419 	DMSG("Mapped tx %#"PRIxPA" size %#x @ %p", tx_pa, sz, tx);
420 	DMSG("Mapped rx %#"PRIxPA" size %#x @ %p", rx_pa, sz, rx);
421 out:
422 	cpu_spin_unlock(&rxtx->spinlock);
423 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
424 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
425 }
426 
427 void spmc_handle_rxtx_unmap(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
428 {
429 	uint32_t ret_fid = FFA_ERROR;
430 	int rc = FFA_INVALID_PARAMETERS;
431 
432 	cpu_spin_lock(&rxtx->spinlock);
433 
434 	if (!rxtx->size)
435 		goto out;
436 
437 	/* We don't unmap the SP memory as the SP might still use it */
438 	if (is_nw_buf(rxtx)) {
439 		unmap_buf(rxtx->rx, rxtx->size);
440 		unmap_buf(rxtx->tx, rxtx->size);
441 	}
442 	rxtx->size = 0;
443 	rxtx->rx = NULL;
444 	rxtx->tx = NULL;
445 	ret_fid = FFA_SUCCESS_32;
446 	rc = 0;
447 out:
448 	cpu_spin_unlock(&rxtx->spinlock);
449 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
450 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
451 }
452 
453 void spmc_handle_rx_release(struct thread_smc_args *args, struct ffa_rxtx *rxtx)
454 {
455 	uint32_t ret_fid = 0;
456 	int rc = 0;
457 
458 	cpu_spin_lock(&rxtx->spinlock);
459 	/* The senders RX is our TX */
460 	if (!rxtx->size || rxtx->tx_is_mine) {
461 		ret_fid = FFA_ERROR;
462 		rc = FFA_DENIED;
463 	} else {
464 		ret_fid = FFA_SUCCESS_32;
465 		rc = 0;
466 		rxtx->tx_is_mine = true;
467 	}
468 	cpu_spin_unlock(&rxtx->spinlock);
469 
470 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
471 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
472 }
473 
474 static bool is_nil_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
475 {
476 	return !w0 && !w1 && !w2 && !w3;
477 }
478 
479 static bool is_my_uuid(uint32_t w0, uint32_t w1, uint32_t w2, uint32_t w3)
480 {
481 	/*
482 	 * This depends on which UUID we have been assigned.
483 	 * TODO add a generic mechanism to obtain our UUID.
484 	 *
485 	 * The test below is for the hard coded UUID
486 	 * 486178e0-e7f8-11e3-bc5e-0002a5d5c51b
487 	 */
488 	return w0 == my_uuid_words[0] && w1 == my_uuid_words[1] &&
489 	       w2 == my_uuid_words[2] && w3 == my_uuid_words[3];
490 }
491 
492 TEE_Result spmc_fill_partition_entry(uint32_t ffa_vers, void *buf, size_t blen,
493 				     size_t idx, uint16_t endpoint_id,
494 				     uint16_t execution_context,
495 				     uint32_t part_props,
496 				     const uint32_t uuid_words[4])
497 {
498 	struct ffa_partition_info_x *fpi = NULL;
499 	size_t fpi_size = sizeof(*fpi);
500 
501 	if (ffa_vers >= FFA_VERSION_1_1)
502 		fpi_size += FFA_UUID_SIZE;
503 
504 	if ((idx + 1) * fpi_size > blen)
505 		return TEE_ERROR_OUT_OF_MEMORY;
506 
507 	fpi = (void *)((vaddr_t)buf + idx * fpi_size);
508 	fpi->id = endpoint_id;
509 	/* Number of execution contexts implemented by this partition */
510 	fpi->execution_context = execution_context;
511 
512 	fpi->partition_properties = part_props;
513 
514 	if (ffa_vers >= FFA_VERSION_1_1) {
515 		if (uuid_words)
516 			memcpy(fpi->uuid, uuid_words, FFA_UUID_SIZE);
517 		else
518 			memset(fpi->uuid, 0, FFA_UUID_SIZE);
519 	}
520 
521 	return TEE_SUCCESS;
522 }
523 
524 static int handle_partition_info_get_all(size_t *elem_count,
525 					 struct ffa_rxtx *rxtx, bool count_only)
526 {
527 	if (!count_only) {
528 		/* Add OP-TEE SP */
529 		if (spmc_fill_partition_entry(rxtx->ffa_vers, rxtx->tx,
530 					      rxtx->size, 0, my_endpoint_id,
531 					      CFG_TEE_CORE_NB_CORE,
532 					      my_part_props, my_uuid_words))
533 			return FFA_NO_MEMORY;
534 	}
535 	*elem_count = 1;
536 
537 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
538 		if (sp_partition_info_get(rxtx->ffa_vers, rxtx->tx, rxtx->size,
539 					  NULL, elem_count, count_only))
540 			return FFA_NO_MEMORY;
541 	}
542 
543 	return FFA_OK;
544 }
545 
546 void spmc_handle_partition_info_get(struct thread_smc_args *args,
547 				    struct ffa_rxtx *rxtx)
548 {
549 	TEE_Result res = TEE_SUCCESS;
550 	uint32_t ret_fid = FFA_ERROR;
551 	uint32_t fpi_size = 0;
552 	uint32_t rc = 0;
553 	bool count_only = args->a5 & FFA_PARTITION_INFO_GET_COUNT_FLAG;
554 
555 	if (!count_only) {
556 		cpu_spin_lock(&rxtx->spinlock);
557 
558 		if (!rxtx->size || !rxtx->tx_is_mine) {
559 			rc = FFA_BUSY;
560 			goto out;
561 		}
562 	}
563 
564 	if (is_nil_uuid(args->a1, args->a2, args->a3, args->a4)) {
565 		size_t elem_count = 0;
566 
567 		ret_fid = handle_partition_info_get_all(&elem_count, rxtx,
568 							count_only);
569 
570 		if (ret_fid) {
571 			rc = ret_fid;
572 			ret_fid = FFA_ERROR;
573 		} else {
574 			ret_fid = FFA_SUCCESS_32;
575 			rc = elem_count;
576 		}
577 
578 		goto out;
579 	}
580 
581 	if (is_my_uuid(args->a1, args->a2, args->a3, args->a4)) {
582 		if (!count_only) {
583 			res = spmc_fill_partition_entry(rxtx->ffa_vers,
584 							rxtx->tx, rxtx->size, 0,
585 							my_endpoint_id,
586 							CFG_TEE_CORE_NB_CORE,
587 							my_part_props,
588 							my_uuid_words);
589 			if (res) {
590 				ret_fid = FFA_ERROR;
591 				rc = FFA_INVALID_PARAMETERS;
592 				goto out;
593 			}
594 		}
595 		rc = 1;
596 	} else if (IS_ENABLED(CFG_SECURE_PARTITION)) {
597 		uint32_t uuid_array[4] = { 0 };
598 		TEE_UUID uuid = { };
599 		size_t count = 0;
600 
601 		uuid_array[0] = args->a1;
602 		uuid_array[1] = args->a2;
603 		uuid_array[2] = args->a3;
604 		uuid_array[3] = args->a4;
605 		tee_uuid_from_octets(&uuid, (uint8_t *)uuid_array);
606 
607 		res = sp_partition_info_get(rxtx->ffa_vers, rxtx->tx,
608 					    rxtx->size, &uuid, &count,
609 					    count_only);
610 		if (res != TEE_SUCCESS) {
611 			ret_fid = FFA_ERROR;
612 			rc = FFA_INVALID_PARAMETERS;
613 			goto out;
614 		}
615 		rc = count;
616 	} else {
617 		ret_fid = FFA_ERROR;
618 		rc = FFA_INVALID_PARAMETERS;
619 		goto out;
620 	}
621 
622 	ret_fid = FFA_SUCCESS_32;
623 
624 out:
625 	if (ret_fid == FFA_SUCCESS_32 && !count_only &&
626 	    rxtx->ffa_vers >= FFA_VERSION_1_1)
627 		fpi_size = sizeof(struct ffa_partition_info_x) + FFA_UUID_SIZE;
628 
629 	spmc_set_args(args, ret_fid, FFA_PARAM_MBZ, rc, fpi_size,
630 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
631 	if (!count_only) {
632 		rxtx->tx_is_mine = false;
633 		cpu_spin_unlock(&rxtx->spinlock);
634 	}
635 }
636 
637 static void spmc_handle_run(struct thread_smc_args *args)
638 {
639 	uint16_t endpoint = FFA_TARGET_INFO_GET_SP_ID(args->a1);
640 	uint16_t thread_id = FFA_TARGET_INFO_GET_VCPU_ID(args->a1);
641 	uint32_t rc = FFA_OK;
642 
643 	if (endpoint != my_endpoint_id) {
644 		/*
645 		 * The endpoint should be an SP, try to resume the SP from
646 		 * preempted into busy state.
647 		 */
648 		rc = spmc_sp_resume_from_preempted(endpoint);
649 		if (rc)
650 			goto out;
651 	}
652 
653 	thread_resume_from_rpc(thread_id, 0, 0, 0, 0);
654 
655 	/* thread_resume_from_rpc return only of the thread_id is invalid */
656 	rc = FFA_INVALID_PARAMETERS;
657 
658 out:
659 	spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, rc, FFA_PARAM_MBZ,
660 		      FFA_PARAM_MBZ, FFA_PARAM_MBZ);
661 }
662 #endif /*CFG_CORE_SEL1_SPMC*/
663 
664 static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
665 					uint16_t vm_id)
666 {
667 	uint32_t old_itr_status = 0;
668 
669 	if (!spmc_notif_is_ready) {
670 		/*
671 		 * This should never happen, not if normal world respects the
672 		 * exchanged capabilities.
673 		 */
674 		EMSG("Asynchronous notifications are not ready");
675 		return TEE_ERROR_NOT_IMPLEMENTED;
676 	}
677 
678 	if (bottom_half_value >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) {
679 		EMSG("Invalid bottom half value %"PRIu32, bottom_half_value);
680 		return TEE_ERROR_BAD_PARAMETERS;
681 	}
682 
683 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
684 	do_bottom_half_value = bottom_half_value;
685 	if (!IS_ENABLED(CFG_CORE_SEL1_SPMC))
686 		notif_vm_id = vm_id;
687 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
688 
689 	notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
690 	return TEE_SUCCESS;
691 }
692 
693 static void handle_yielding_call(struct thread_smc_args *args,
694 				 uint32_t direct_resp_fid)
695 {
696 	TEE_Result res = 0;
697 
698 	thread_check_canaries();
699 
700 #ifdef ARM64
701 	/* Saving this for an eventual RPC */
702 	thread_get_core_local()->direct_resp_fid = direct_resp_fid;
703 #endif
704 
705 	if (args->a3 == OPTEE_FFA_YIELDING_CALL_RESUME) {
706 		/* Note connection to struct thread_rpc_arg::ret */
707 		thread_resume_from_rpc(args->a7, args->a4, args->a5, args->a6,
708 				       0);
709 		res = TEE_ERROR_BAD_PARAMETERS;
710 	} else {
711 		thread_alloc_and_run(args->a1, args->a3, args->a4, args->a5,
712 				     args->a6, args->a7);
713 		res = TEE_ERROR_BUSY;
714 	}
715 	spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1),
716 		      0, res, 0, 0);
717 }
718 
719 static uint32_t handle_unregister_shm(uint32_t a4, uint32_t a5)
720 {
721 	uint64_t cookie = reg_pair_to_64(a5, a4);
722 	uint32_t res = 0;
723 
724 	res = mobj_ffa_unregister_by_cookie(cookie);
725 	switch (res) {
726 	case TEE_SUCCESS:
727 	case TEE_ERROR_ITEM_NOT_FOUND:
728 		return 0;
729 	case TEE_ERROR_BUSY:
730 		EMSG("res %#"PRIx32, res);
731 		return FFA_BUSY;
732 	default:
733 		EMSG("res %#"PRIx32, res);
734 		return FFA_INVALID_PARAMETERS;
735 	}
736 }
737 
738 static void handle_blocking_call(struct thread_smc_args *args,
739 				 uint32_t direct_resp_fid)
740 {
741 	uint32_t sec_caps = 0;
742 
743 	switch (args->a3) {
744 	case OPTEE_FFA_GET_API_VERSION:
745 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
746 			      OPTEE_FFA_VERSION_MAJOR, OPTEE_FFA_VERSION_MINOR,
747 			      0);
748 		break;
749 	case OPTEE_FFA_GET_OS_VERSION:
750 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
751 			      CFG_OPTEE_REVISION_MAJOR,
752 			      CFG_OPTEE_REVISION_MINOR, TEE_IMPL_GIT_SHA1);
753 		break;
754 	case OPTEE_FFA_EXCHANGE_CAPABILITIES:
755 		sec_caps = OPTEE_FFA_SEC_CAP_ARG_OFFSET;
756 		if (spmc_notif_is_ready)
757 			sec_caps |= OPTEE_FFA_SEC_CAP_ASYNC_NOTIF;
758 		spmc_set_args(args, direct_resp_fid,
759 			      swap_src_dst(args->a1), 0, 0,
760 			      THREAD_RPC_MAX_NUM_PARAMS, sec_caps);
761 		break;
762 	case OPTEE_FFA_UNREGISTER_SHM:
763 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
764 			      handle_unregister_shm(args->a4, args->a5), 0, 0);
765 		break;
766 	case OPTEE_FFA_ENABLE_ASYNC_NOTIF:
767 		spmc_set_args(args, direct_resp_fid,
768 			      swap_src_dst(args->a1), 0,
769 			      spmc_enable_async_notif(args->a4,
770 						      FFA_SRC(args->a1)),
771 			      0, 0);
772 		break;
773 	default:
774 		EMSG("Unhandled blocking service ID %#"PRIx32,
775 		     (uint32_t)args->a3);
776 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
777 			      TEE_ERROR_BAD_PARAMETERS, 0, 0);
778 	}
779 }
780 
781 static void handle_framework_direct_request(struct thread_smc_args *args,
782 					    struct ffa_rxtx *rxtx,
783 					    uint32_t direct_resp_fid)
784 {
785 	uint32_t w0 = FFA_ERROR;
786 	uint32_t w1 = FFA_PARAM_MBZ;
787 	uint32_t w2 = FFA_NOT_SUPPORTED;
788 	uint32_t w3 = FFA_PARAM_MBZ;
789 
790 	switch (args->a2 & FFA_MSG_TYPE_MASK) {
791 	case FFA_MSG_SEND_VM_CREATED:
792 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
793 			uint16_t guest_id = args->a5;
794 			TEE_Result res = virt_guest_created(guest_id);
795 
796 			w0 = direct_resp_fid;
797 			w1 = swap_src_dst(args->a1);
798 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_CREATED;
799 			if (res == TEE_SUCCESS)
800 				w3 = FFA_OK;
801 			else if (res == TEE_ERROR_OUT_OF_MEMORY)
802 				w3 = FFA_DENIED;
803 			else
804 				w3 = FFA_INVALID_PARAMETERS;
805 		}
806 		break;
807 	case FFA_MSG_SEND_VM_DESTROYED:
808 		if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
809 			uint16_t guest_id = args->a5;
810 			TEE_Result res = virt_guest_destroyed(guest_id);
811 
812 			w0 = direct_resp_fid;
813 			w1 = swap_src_dst(args->a1);
814 			w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_RESP_VM_DESTROYED;
815 			if (res == TEE_SUCCESS)
816 				w3 = FFA_OK;
817 			else
818 				w3 = FFA_INVALID_PARAMETERS;
819 		}
820 		break;
821 	case FFA_MSG_VERSION_REQ:
822 		w0 = direct_resp_fid;
823 		w1 = swap_src_dst(args->a1);
824 		w2 = FFA_MSG_FLAG_FRAMEWORK | FFA_MSG_VERSION_RESP;
825 		w3 = spmc_exchange_version(args->a3, rxtx);
826 		break;
827 	default:
828 		break;
829 	}
830 	spmc_set_args(args, w0, w1, w2, w3, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
831 }
832 
833 static void handle_direct_request(struct thread_smc_args *args,
834 				  struct ffa_rxtx *rxtx)
835 {
836 	uint32_t direct_resp_fid = 0;
837 
838 	if (IS_ENABLED(CFG_SECURE_PARTITION) &&
839 	    FFA_DST(args->a1) != my_endpoint_id) {
840 		spmc_sp_start_thread(args);
841 		return;
842 	}
843 
844 	if (OPTEE_SMC_IS_64(args->a0))
845 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_64;
846 	else
847 		direct_resp_fid = FFA_MSG_SEND_DIRECT_RESP_32;
848 
849 	if (args->a2 & FFA_MSG_FLAG_FRAMEWORK) {
850 		handle_framework_direct_request(args, rxtx, direct_resp_fid);
851 		return;
852 	}
853 
854 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
855 	    virt_set_guest(get_sender_id(args->a1))) {
856 		spmc_set_args(args, direct_resp_fid, swap_src_dst(args->a1), 0,
857 			      TEE_ERROR_ITEM_NOT_FOUND, 0, 0);
858 		return;
859 	}
860 
861 	if (args->a3 & BIT32(OPTEE_FFA_YIELDING_CALL_BIT))
862 		handle_yielding_call(args, direct_resp_fid);
863 	else
864 		handle_blocking_call(args, direct_resp_fid);
865 
866 	/*
867 	 * Note that handle_yielding_call() typically only returns if a
868 	 * thread cannot be allocated or found. virt_unset_guest() is also
869 	 * called from thread_state_suspend() and thread_state_free().
870 	 */
871 	virt_unset_guest();
872 }
873 
874 int spmc_read_mem_transaction(uint32_t ffa_vers, void *buf, size_t blen,
875 			      struct ffa_mem_transaction_x *trans)
876 {
877 	uint16_t mem_reg_attr = 0;
878 	uint32_t flags = 0;
879 	uint32_t count = 0;
880 	uint32_t offs = 0;
881 	uint32_t size = 0;
882 	size_t n = 0;
883 
884 	if (!IS_ALIGNED_WITH_TYPE(buf, uint64_t))
885 		return FFA_INVALID_PARAMETERS;
886 
887 	if (ffa_vers >= FFA_VERSION_1_1) {
888 		struct ffa_mem_transaction_1_1 *descr = NULL;
889 
890 		if (blen < sizeof(*descr))
891 			return FFA_INVALID_PARAMETERS;
892 
893 		descr = buf;
894 		trans->sender_id = READ_ONCE(descr->sender_id);
895 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
896 		flags = READ_ONCE(descr->flags);
897 		trans->global_handle = READ_ONCE(descr->global_handle);
898 		trans->tag = READ_ONCE(descr->tag);
899 
900 		count = READ_ONCE(descr->mem_access_count);
901 		size = READ_ONCE(descr->mem_access_size);
902 		offs = READ_ONCE(descr->mem_access_offs);
903 	} else {
904 		struct ffa_mem_transaction_1_0 *descr = NULL;
905 
906 		if (blen < sizeof(*descr))
907 			return FFA_INVALID_PARAMETERS;
908 
909 		descr = buf;
910 		trans->sender_id = READ_ONCE(descr->sender_id);
911 		mem_reg_attr = READ_ONCE(descr->mem_reg_attr);
912 		flags = READ_ONCE(descr->flags);
913 		trans->global_handle = READ_ONCE(descr->global_handle);
914 		trans->tag = READ_ONCE(descr->tag);
915 
916 		count = READ_ONCE(descr->mem_access_count);
917 		size = sizeof(struct ffa_mem_access);
918 		offs = offsetof(struct ffa_mem_transaction_1_0,
919 				mem_access_array);
920 	}
921 
922 	if (mem_reg_attr > UINT8_MAX || flags > UINT8_MAX ||
923 	    size > UINT8_MAX || count > UINT8_MAX || offs > UINT16_MAX)
924 		return FFA_INVALID_PARAMETERS;
925 
926 	/* Check that the endpoint memory access descriptor array fits */
927 	if (MUL_OVERFLOW(size, count, &n) || ADD_OVERFLOW(offs, n, &n) ||
928 	    n > blen)
929 		return FFA_INVALID_PARAMETERS;
930 
931 	trans->mem_reg_attr = mem_reg_attr;
932 	trans->flags = flags;
933 	trans->mem_access_size = size;
934 	trans->mem_access_count = count;
935 	trans->mem_access_offs = offs;
936 	return 0;
937 }
938 
939 #if defined(CFG_CORE_SEL1_SPMC)
940 static int get_acc_perms(vaddr_t mem_acc_base, unsigned int mem_access_size,
941 			 unsigned int mem_access_count, uint8_t *acc_perms,
942 			 unsigned int *region_offs)
943 {
944 	struct ffa_mem_access_perm *descr = NULL;
945 	struct ffa_mem_access *mem_acc = NULL;
946 	unsigned int n = 0;
947 
948 	for (n = 0; n < mem_access_count; n++) {
949 		mem_acc = (void *)(mem_acc_base + mem_access_size * n);
950 		descr = &mem_acc->access_perm;
951 		if (READ_ONCE(descr->endpoint_id) == my_endpoint_id) {
952 			*acc_perms = READ_ONCE(descr->perm);
953 			*region_offs = READ_ONCE(mem_acc[n].region_offs);
954 			return 0;
955 		}
956 	}
957 
958 	return FFA_INVALID_PARAMETERS;
959 }
960 
961 static int mem_share_init(struct ffa_mem_transaction_x *mem_trans, void *buf,
962 			  size_t blen, unsigned int *page_count,
963 			  unsigned int *region_count, size_t *addr_range_offs)
964 {
965 	const uint16_t exp_mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
966 	const uint8_t exp_mem_acc_perm = FFA_MEM_ACC_RW;
967 	struct ffa_mem_region *region_descr = NULL;
968 	unsigned int region_descr_offs = 0;
969 	uint8_t mem_acc_perm = 0;
970 	size_t n = 0;
971 
972 	if (mem_trans->mem_reg_attr != exp_mem_reg_attr)
973 		return FFA_INVALID_PARAMETERS;
974 
975 	/* Check that the access permissions matches what's expected */
976 	if (get_acc_perms((vaddr_t)buf + mem_trans->mem_access_offs,
977 			  mem_trans->mem_access_size,
978 			  mem_trans->mem_access_count,
979 			  &mem_acc_perm, &region_descr_offs) ||
980 	    mem_acc_perm != exp_mem_acc_perm)
981 		return FFA_INVALID_PARAMETERS;
982 
983 	/* Check that the Composite memory region descriptor fits */
984 	if (ADD_OVERFLOW(region_descr_offs, sizeof(*region_descr), &n) ||
985 	    n > blen)
986 		return FFA_INVALID_PARAMETERS;
987 
988 	if (!IS_ALIGNED_WITH_TYPE((vaddr_t)buf + region_descr_offs,
989 				  struct ffa_mem_region))
990 		return FFA_INVALID_PARAMETERS;
991 
992 	region_descr = (struct ffa_mem_region *)((vaddr_t)buf +
993 						 region_descr_offs);
994 	*page_count = READ_ONCE(region_descr->total_page_count);
995 	*region_count = READ_ONCE(region_descr->address_range_count);
996 	*addr_range_offs = n;
997 	return 0;
998 }
999 
1000 static int add_mem_share_helper(struct mem_share_state *s, void *buf,
1001 				size_t flen)
1002 {
1003 	unsigned int region_count = flen / sizeof(struct ffa_address_range);
1004 	struct ffa_address_range *arange = NULL;
1005 	unsigned int n = 0;
1006 
1007 	if (region_count > s->region_count)
1008 		region_count = s->region_count;
1009 
1010 	if (!IS_ALIGNED_WITH_TYPE(buf, struct ffa_address_range))
1011 		return FFA_INVALID_PARAMETERS;
1012 	arange = buf;
1013 
1014 	for (n = 0; n < region_count; n++) {
1015 		unsigned int page_count = READ_ONCE(arange[n].page_count);
1016 		uint64_t addr = READ_ONCE(arange[n].address);
1017 
1018 		if (mobj_ffa_add_pages_at(s->mf, &s->current_page_idx,
1019 					  addr, page_count))
1020 			return FFA_INVALID_PARAMETERS;
1021 	}
1022 
1023 	s->region_count -= region_count;
1024 	if (s->region_count)
1025 		return region_count * sizeof(*arange);
1026 
1027 	if (s->current_page_idx != s->page_count)
1028 		return FFA_INVALID_PARAMETERS;
1029 
1030 	return 0;
1031 }
1032 
1033 static int add_mem_share_frag(struct mem_frag_state *s, void *buf, size_t flen)
1034 {
1035 	int rc = 0;
1036 
1037 	rc = add_mem_share_helper(&s->share, buf, flen);
1038 	if (rc >= 0) {
1039 		if (!ADD_OVERFLOW(s->frag_offset, rc, &s->frag_offset)) {
1040 			/* We're not at the end of the descriptor yet */
1041 			if (s->share.region_count)
1042 				return s->frag_offset;
1043 
1044 			/* We're done */
1045 			rc = 0;
1046 		} else {
1047 			rc = FFA_INVALID_PARAMETERS;
1048 		}
1049 	}
1050 
1051 	SLIST_REMOVE(&frag_state_head, s, mem_frag_state, link);
1052 	if (rc < 0)
1053 		mobj_ffa_sel1_spmc_delete(s->share.mf);
1054 	else
1055 		mobj_ffa_push_to_inactive(s->share.mf);
1056 	free(s);
1057 
1058 	return rc;
1059 }
1060 
1061 static bool is_sp_share(struct ffa_mem_transaction_x *mem_trans,
1062 			void *buf)
1063 {
1064 	struct ffa_mem_access_perm *perm = NULL;
1065 	struct ffa_mem_access *mem_acc = NULL;
1066 
1067 	if (!IS_ENABLED(CFG_SECURE_PARTITION))
1068 		return false;
1069 
1070 	if (mem_trans->mem_access_count < 1)
1071 		return false;
1072 
1073 	mem_acc = (void *)((vaddr_t)buf + mem_trans->mem_access_offs);
1074 	perm = &mem_acc->access_perm;
1075 
1076 	/*
1077 	 * perm->endpoint_id is read here only to check if the endpoint is
1078 	 * OP-TEE. We do read it later on again, but there are some additional
1079 	 * checks there to make sure that the data is correct.
1080 	 */
1081 	return READ_ONCE(perm->endpoint_id) != my_endpoint_id;
1082 }
1083 
1084 static int add_mem_share(struct ffa_mem_transaction_x *mem_trans,
1085 			 tee_mm_entry_t *mm, void *buf, size_t blen,
1086 			 size_t flen, uint64_t *global_handle)
1087 {
1088 	int rc = 0;
1089 	struct mem_share_state share = { };
1090 	size_t addr_range_offs = 0;
1091 	uint64_t cookie = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1092 	size_t n = 0;
1093 
1094 	rc = mem_share_init(mem_trans, buf, flen, &share.page_count,
1095 			    &share.region_count, &addr_range_offs);
1096 	if (rc)
1097 		return rc;
1098 
1099 	if (MUL_OVERFLOW(share.region_count,
1100 			 sizeof(struct ffa_address_range), &n) ||
1101 	    ADD_OVERFLOW(n, addr_range_offs, &n) || n > blen)
1102 		return FFA_INVALID_PARAMETERS;
1103 
1104 	if (mem_trans->global_handle)
1105 		cookie = mem_trans->global_handle;
1106 	share.mf = mobj_ffa_sel1_spmc_new(cookie, share.page_count);
1107 	if (!share.mf)
1108 		return FFA_NO_MEMORY;
1109 
1110 	if (flen != blen) {
1111 		struct mem_frag_state *s = calloc(sizeof(*s), 1);
1112 
1113 		if (!s) {
1114 			rc = FFA_NO_MEMORY;
1115 			goto err;
1116 		}
1117 		s->share = share;
1118 		s->mm = mm;
1119 		s->frag_offset = addr_range_offs;
1120 
1121 		SLIST_INSERT_HEAD(&frag_state_head, s, link);
1122 		rc = add_mem_share_frag(s, (char *)buf + addr_range_offs,
1123 					flen - addr_range_offs);
1124 
1125 		if (rc >= 0)
1126 			*global_handle = mobj_ffa_get_cookie(share.mf);
1127 
1128 		return rc;
1129 	}
1130 
1131 	rc = add_mem_share_helper(&share, (char *)buf + addr_range_offs,
1132 				  flen - addr_range_offs);
1133 	if (rc) {
1134 		/*
1135 		 * Number of consumed bytes may be returned instead of 0 for
1136 		 * done.
1137 		 */
1138 		rc = FFA_INVALID_PARAMETERS;
1139 		goto err;
1140 	}
1141 
1142 	*global_handle = mobj_ffa_push_to_inactive(share.mf);
1143 
1144 	return 0;
1145 err:
1146 	mobj_ffa_sel1_spmc_delete(share.mf);
1147 	return rc;
1148 }
1149 
1150 static int handle_mem_share_tmem(paddr_t pbuf, size_t blen, size_t flen,
1151 				 unsigned int page_count,
1152 				 uint64_t *global_handle, struct ffa_rxtx *rxtx)
1153 {
1154 	struct ffa_mem_transaction_x mem_trans = { };
1155 	int rc = 0;
1156 	size_t len = 0;
1157 	void *buf = NULL;
1158 	tee_mm_entry_t *mm = NULL;
1159 	vaddr_t offs = pbuf & SMALL_PAGE_MASK;
1160 
1161 	if (MUL_OVERFLOW(page_count, SMALL_PAGE_SIZE, &len))
1162 		return FFA_INVALID_PARAMETERS;
1163 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pbuf, len))
1164 		return FFA_INVALID_PARAMETERS;
1165 
1166 	/*
1167 	 * Check that the length reported in flen is covered by len even
1168 	 * if the offset is taken into account.
1169 	 */
1170 	if (len < flen || len - offs < flen)
1171 		return FFA_INVALID_PARAMETERS;
1172 
1173 	mm = tee_mm_alloc(&tee_mm_shm, len);
1174 	if (!mm)
1175 		return FFA_NO_MEMORY;
1176 
1177 	if (core_mmu_map_contiguous_pages(tee_mm_get_smem(mm), pbuf,
1178 					  page_count, MEM_AREA_NSEC_SHM)) {
1179 		rc = FFA_INVALID_PARAMETERS;
1180 		goto out;
1181 	}
1182 	buf = (void *)(tee_mm_get_smem(mm) + offs);
1183 
1184 	cpu_spin_lock(&rxtx->spinlock);
1185 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, buf, flen, &mem_trans);
1186 	if (!rc && IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1187 	    virt_set_guest(mem_trans.sender_id))
1188 		rc = FFA_DENIED;
1189 	if (!rc)
1190 		rc = add_mem_share(&mem_trans, mm, buf, blen, flen,
1191 				   global_handle);
1192 	virt_unset_guest();
1193 	cpu_spin_unlock(&rxtx->spinlock);
1194 	if (rc > 0)
1195 		return rc;
1196 
1197 	core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1198 out:
1199 	tee_mm_free(mm);
1200 	return rc;
1201 }
1202 
1203 static int handle_mem_share_rxbuf(size_t blen, size_t flen,
1204 				  uint64_t *global_handle,
1205 				  struct ffa_rxtx *rxtx)
1206 {
1207 	struct ffa_mem_transaction_x mem_trans = { };
1208 	int rc = FFA_DENIED;
1209 
1210 	cpu_spin_lock(&rxtx->spinlock);
1211 
1212 	if (!rxtx->rx || flen > rxtx->size)
1213 		goto out;
1214 
1215 	rc = spmc_read_mem_transaction(rxtx->ffa_vers, rxtx->rx, flen,
1216 				       &mem_trans);
1217 	if (rc)
1218 		goto out;
1219 	if (is_sp_share(&mem_trans, rxtx->rx)) {
1220 		rc = spmc_sp_add_share(&mem_trans, rxtx, blen,
1221 				       global_handle, NULL);
1222 		goto out;
1223 	}
1224 
1225 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
1226 	    virt_set_guest(mem_trans.sender_id))
1227 		goto out;
1228 
1229 	rc = add_mem_share(&mem_trans, NULL, rxtx->rx, blen, flen,
1230 			   global_handle);
1231 
1232 	virt_unset_guest();
1233 
1234 out:
1235 	cpu_spin_unlock(&rxtx->spinlock);
1236 
1237 	return rc;
1238 }
1239 
1240 static void handle_mem_share(struct thread_smc_args *args,
1241 			     struct ffa_rxtx *rxtx)
1242 {
1243 	uint32_t tot_len = args->a1;
1244 	uint32_t frag_len = args->a2;
1245 	uint64_t addr = args->a3;
1246 	uint32_t page_count = args->a4;
1247 	uint32_t ret_w1 = 0;
1248 	uint32_t ret_w2 = FFA_INVALID_PARAMETERS;
1249 	uint32_t ret_w3 = 0;
1250 	uint32_t ret_fid = FFA_ERROR;
1251 	uint64_t global_handle = 0;
1252 	int rc = 0;
1253 
1254 	/* Check that the MBZs are indeed 0 */
1255 	if (args->a5 || args->a6 || args->a7)
1256 		goto out;
1257 
1258 	/* Check that fragment length doesn't exceed total length */
1259 	if (frag_len > tot_len)
1260 		goto out;
1261 
1262 	/* Check for 32-bit calling convention */
1263 	if (args->a0 == FFA_MEM_SHARE_32)
1264 		addr &= UINT32_MAX;
1265 
1266 	if (!addr) {
1267 		/*
1268 		 * The memory transaction descriptor is passed via our rx
1269 		 * buffer.
1270 		 */
1271 		if (page_count)
1272 			goto out;
1273 		rc = handle_mem_share_rxbuf(tot_len, frag_len, &global_handle,
1274 					    rxtx);
1275 	} else {
1276 		rc = handle_mem_share_tmem(addr, tot_len, frag_len, page_count,
1277 					   &global_handle, rxtx);
1278 	}
1279 	if (rc < 0) {
1280 		ret_w2 = rc;
1281 	} else if (rc > 0) {
1282 		ret_fid = FFA_MEM_FRAG_RX;
1283 		ret_w3 = rc;
1284 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1285 	} else {
1286 		ret_fid = FFA_SUCCESS_32;
1287 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1288 	}
1289 out:
1290 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1291 }
1292 
1293 static struct mem_frag_state *get_frag_state(uint64_t global_handle)
1294 {
1295 	struct mem_frag_state *s = NULL;
1296 
1297 	SLIST_FOREACH(s, &frag_state_head, link)
1298 		if (mobj_ffa_get_cookie(s->share.mf) == global_handle)
1299 			return s;
1300 
1301 	return NULL;
1302 }
1303 
1304 static void handle_mem_frag_tx(struct thread_smc_args *args,
1305 			       struct ffa_rxtx *rxtx)
1306 {
1307 	uint64_t global_handle = reg_pair_to_64(args->a2, args->a1);
1308 	size_t flen = args->a3;
1309 	uint32_t endpoint_id = args->a4;
1310 	struct mem_frag_state *s = NULL;
1311 	tee_mm_entry_t *mm = NULL;
1312 	unsigned int page_count = 0;
1313 	void *buf = NULL;
1314 	uint32_t ret_w1 = 0;
1315 	uint32_t ret_w2 = 0;
1316 	uint32_t ret_w3 = 0;
1317 	uint32_t ret_fid = 0;
1318 	int rc = 0;
1319 
1320 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1321 		uint16_t guest_id = endpoint_id >> 16;
1322 
1323 		if (!guest_id || virt_set_guest(guest_id)) {
1324 			rc = FFA_INVALID_PARAMETERS;
1325 			goto out_set_rc;
1326 		}
1327 	}
1328 
1329 	/*
1330 	 * Currently we're only doing this for fragmented FFA_MEM_SHARE_*
1331 	 * requests.
1332 	 */
1333 
1334 	cpu_spin_lock(&rxtx->spinlock);
1335 
1336 	s = get_frag_state(global_handle);
1337 	if (!s) {
1338 		rc = FFA_INVALID_PARAMETERS;
1339 		goto out;
1340 	}
1341 
1342 	mm = s->mm;
1343 	if (mm) {
1344 		if (flen > tee_mm_get_bytes(mm)) {
1345 			rc = FFA_INVALID_PARAMETERS;
1346 			goto out;
1347 		}
1348 		page_count = s->share.page_count;
1349 		buf = (void *)tee_mm_get_smem(mm);
1350 	} else {
1351 		if (flen > rxtx->size) {
1352 			rc = FFA_INVALID_PARAMETERS;
1353 			goto out;
1354 		}
1355 		buf = rxtx->rx;
1356 	}
1357 
1358 	rc = add_mem_share_frag(s, buf, flen);
1359 out:
1360 	virt_unset_guest();
1361 	cpu_spin_unlock(&rxtx->spinlock);
1362 
1363 	if (rc <= 0 && mm) {
1364 		core_mmu_unmap_pages(tee_mm_get_smem(mm), page_count);
1365 		tee_mm_free(mm);
1366 	}
1367 
1368 out_set_rc:
1369 	if (rc < 0) {
1370 		ret_fid = FFA_ERROR;
1371 		ret_w2 = rc;
1372 	} else if (rc > 0) {
1373 		ret_fid = FFA_MEM_FRAG_RX;
1374 		ret_w3 = rc;
1375 		reg_pair_from_64(global_handle, &ret_w2, &ret_w1);
1376 	} else {
1377 		ret_fid = FFA_SUCCESS_32;
1378 		reg_pair_from_64(global_handle, &ret_w3, &ret_w2);
1379 	}
1380 
1381 	spmc_set_args(args, ret_fid, ret_w1, ret_w2, ret_w3, 0, 0);
1382 }
1383 
1384 static void handle_mem_reclaim(struct thread_smc_args *args)
1385 {
1386 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1387 	uint32_t ret_fid = FFA_ERROR;
1388 	uint64_t cookie = 0;
1389 
1390 	if (args->a3 || args->a4 || args->a5 || args->a6 || args->a7)
1391 		goto out;
1392 
1393 	cookie = reg_pair_to_64(args->a2, args->a1);
1394 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
1395 		uint16_t guest_id = 0;
1396 
1397 		if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
1398 			guest_id = virt_find_guest_by_cookie(cookie);
1399 		} else {
1400 			guest_id = (cookie >> FFA_MEMORY_HANDLE_PRTN_SHIFT) &
1401 				   FFA_MEMORY_HANDLE_PRTN_MASK;
1402 		}
1403 		if (!guest_id || virt_set_guest(guest_id))
1404 			goto out;
1405 	}
1406 
1407 	switch (mobj_ffa_sel1_spmc_reclaim(cookie)) {
1408 	case TEE_SUCCESS:
1409 		ret_fid = FFA_SUCCESS_32;
1410 		ret_val = 0;
1411 		break;
1412 	case TEE_ERROR_ITEM_NOT_FOUND:
1413 		DMSG("cookie %#"PRIx64" not found", cookie);
1414 		ret_val = FFA_INVALID_PARAMETERS;
1415 		break;
1416 	default:
1417 		DMSG("cookie %#"PRIx64" busy", cookie);
1418 		ret_val = FFA_DENIED;
1419 		break;
1420 	}
1421 
1422 	virt_unset_guest();
1423 
1424 out:
1425 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1426 }
1427 
1428 static void handle_notification_bitmap_create(struct thread_smc_args *args)
1429 {
1430 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1431 	uint32_t ret_fid = FFA_ERROR;
1432 	uint32_t old_itr_status = 0;
1433 
1434 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1435 	    !args->a5 && !args->a6 && !args->a7) {
1436 		uint16_t vm_id = args->a1;
1437 
1438 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1439 
1440 		if (notif_vm_id_valid) {
1441 			if (vm_id == notif_vm_id)
1442 				ret_val = FFA_DENIED;
1443 			else
1444 				ret_val = FFA_NO_MEMORY;
1445 		} else {
1446 			notif_vm_id = vm_id;
1447 			notif_vm_id_valid = true;
1448 			ret_val = FFA_OK;
1449 			ret_fid = FFA_SUCCESS_32;
1450 		}
1451 
1452 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1453 	}
1454 
1455 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1456 }
1457 
1458 static void handle_notification_bitmap_destroy(struct thread_smc_args *args)
1459 {
1460 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1461 	uint32_t ret_fid = FFA_ERROR;
1462 	uint32_t old_itr_status = 0;
1463 
1464 	if (!FFA_TARGET_INFO_GET_SP_ID(args->a1) && !args->a3 && !args->a4 &&
1465 	    !args->a5 && !args->a6 && !args->a7) {
1466 		uint16_t vm_id = args->a1;
1467 
1468 		old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1469 
1470 		if (notif_vm_id_valid && vm_id == notif_vm_id) {
1471 			if (notif_pending_bitmap || notif_bound_bitmap) {
1472 				ret_val = FFA_DENIED;
1473 			} else {
1474 				notif_vm_id_valid = false;
1475 				ret_val = FFA_OK;
1476 				ret_fid = FFA_SUCCESS_32;
1477 			}
1478 		}
1479 
1480 		cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1481 	}
1482 
1483 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1484 }
1485 
1486 static void handle_notification_bind(struct thread_smc_args *args)
1487 {
1488 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1489 	uint32_t ret_fid = FFA_ERROR;
1490 	uint32_t old_itr_status = 0;
1491 	uint64_t bitmap = 0;
1492 	uint16_t vm_id = 0;
1493 
1494 	if (args->a5 || args->a6 || args->a7)
1495 		goto out;
1496 	if (args->a2) {
1497 		/* We only deal with global notifications for now */
1498 		ret_val = FFA_NOT_SUPPORTED;
1499 		goto out;
1500 	}
1501 
1502 	/* The destination of the eventual notification */
1503 	vm_id = FFA_DST(args->a1);
1504 	bitmap = reg_pair_to_64(args->a4, args->a3);
1505 
1506 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1507 
1508 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1509 		if (bitmap & notif_bound_bitmap) {
1510 			ret_val = FFA_DENIED;
1511 		} else {
1512 			notif_bound_bitmap |= bitmap;
1513 			ret_val = FFA_OK;
1514 			ret_fid = FFA_SUCCESS_32;
1515 		}
1516 	}
1517 
1518 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1519 out:
1520 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1521 }
1522 
1523 static void handle_notification_unbind(struct thread_smc_args *args)
1524 {
1525 	uint32_t ret_val = FFA_INVALID_PARAMETERS;
1526 	uint32_t ret_fid = FFA_ERROR;
1527 	uint32_t old_itr_status = 0;
1528 	uint64_t bitmap = 0;
1529 	uint16_t vm_id = 0;
1530 
1531 	if (args->a2 || args->a5 || args->a6 || args->a7)
1532 		goto out;
1533 
1534 	/* The destination of the eventual notification */
1535 	vm_id = FFA_DST(args->a1);
1536 	bitmap = reg_pair_to_64(args->a4, args->a3);
1537 
1538 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1539 
1540 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1541 		/*
1542 		 * Spec says:
1543 		 * At least one notification is bound to another Sender or
1544 		 * is currently pending.
1545 		 *
1546 		 * Not sure what the intention is.
1547 		 */
1548 		if (bitmap & notif_pending_bitmap) {
1549 			ret_val = FFA_DENIED;
1550 		} else {
1551 			notif_bound_bitmap &= ~bitmap;
1552 			ret_val = FFA_OK;
1553 			ret_fid = FFA_SUCCESS_32;
1554 		}
1555 	}
1556 
1557 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1558 out:
1559 	spmc_set_args(args, ret_fid, 0, ret_val, 0, 0, 0);
1560 }
1561 
1562 static void handle_notification_get(struct thread_smc_args *args)
1563 {
1564 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1565 	uint32_t ret_fid = FFA_ERROR;
1566 	uint32_t old_itr_status = 0;
1567 	uint16_t vm_id = 0;
1568 	uint32_t w3 = 0;
1569 
1570 	if (args->a5 || args->a6 || args->a7)
1571 		goto out;
1572 	if (!(args->a2 & 0x1)) {
1573 		ret_fid = FFA_SUCCESS_32;
1574 		w2 = 0;
1575 		goto out;
1576 	}
1577 	vm_id = FFA_DST(args->a1);
1578 
1579 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1580 
1581 	if (notif_vm_id_valid && vm_id == notif_vm_id) {
1582 		reg_pair_from_64(notif_pending_bitmap, &w3, &w2);
1583 		notif_pending_bitmap = 0;
1584 		ret_fid = FFA_SUCCESS_32;
1585 	}
1586 
1587 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1588 out:
1589 	spmc_set_args(args, ret_fid, 0, w2, w3, 0, 0);
1590 }
1591 
1592 static void handle_notification_info_get(struct thread_smc_args *args)
1593 {
1594 	uint32_t w2 = FFA_INVALID_PARAMETERS;
1595 	uint32_t ret_fid = FFA_ERROR;
1596 
1597 	if (args->a1 || args->a2 || args->a3 || args->a4 || args->a5 ||
1598 	    args->a6 || args->a7)
1599 		goto out;
1600 
1601 	if (OPTEE_SMC_IS_64(args->a0))
1602 		ret_fid = FFA_SUCCESS_64;
1603 	else
1604 		ret_fid = FFA_SUCCESS_32;
1605 
1606 	/*
1607 	 * Note, we're only supporting physical OS kernel in normal world
1608 	 * with Global Notifications.
1609 	 * So one list of ID list registers (BIT[11:7])
1610 	 * and one count of IDs (BIT[13:12] + 1)
1611 	 * and the VM is always 0.
1612 	 */
1613 	w2 = SHIFT_U32(1, 7);
1614 out:
1615 	spmc_set_args(args, ret_fid, 0, w2, 0, 0, 0);
1616 }
1617 
1618 void thread_spmc_set_async_notif_intid(int intid)
1619 {
1620 	assert(interrupt_can_raise_sgi(interrupt_get_main_chip()));
1621 	notif_intid = intid;
1622 	spmc_notif_is_ready = true;
1623 	DMSG("Asynchronous notifications are ready");
1624 }
1625 
1626 void notif_send_async(uint32_t value)
1627 {
1628 	uint32_t old_itr_status = 0;
1629 
1630 	old_itr_status = cpu_spin_lock_xsave(&spmc_notif_lock);
1631 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1632 	       do_bottom_half_value >= 0 && notif_intid >= 0);
1633 	notif_pending_bitmap |= BIT64(do_bottom_half_value);
1634 	interrupt_raise_sgi(interrupt_get_main_chip(), notif_intid,
1635 			    ITR_CPU_MASK_TO_THIS_CPU);
1636 	cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
1637 }
1638 #else
1639 void notif_send_async(uint32_t value)
1640 {
1641 	/* global notification, delay notification interrupt */
1642 	uint32_t flags = BIT32(1);
1643 	int res = 0;
1644 
1645 	assert(value == NOTIF_VALUE_DO_BOTTOM_HALF && spmc_notif_is_ready &&
1646 	       do_bottom_half_value >= 0);
1647 	res = ffa_set_notification(notif_vm_id, my_endpoint_id, flags,
1648 				   BIT64(do_bottom_half_value));
1649 	if (res) {
1650 		EMSG("notification set failed with error %d", res);
1651 		panic();
1652 	}
1653 }
1654 #endif
1655 
1656 /* Only called from assembly */
1657 void thread_spmc_msg_recv(struct thread_smc_args *args);
1658 void thread_spmc_msg_recv(struct thread_smc_args *args)
1659 {
1660 	assert((thread_get_exceptions() & THREAD_EXCP_ALL) == THREAD_EXCP_ALL);
1661 	switch (args->a0) {
1662 #if defined(CFG_CORE_SEL1_SPMC)
1663 	case FFA_FEATURES:
1664 		handle_features(args);
1665 		break;
1666 	case FFA_SPM_ID_GET:
1667 		handle_spm_id_get(args);
1668 		break;
1669 #ifdef ARM64
1670 	case FFA_RXTX_MAP_64:
1671 #endif
1672 	case FFA_RXTX_MAP_32:
1673 		spmc_handle_rxtx_map(args, &my_rxtx);
1674 		break;
1675 	case FFA_RXTX_UNMAP:
1676 		spmc_handle_rxtx_unmap(args, &my_rxtx);
1677 		break;
1678 	case FFA_RX_RELEASE:
1679 		spmc_handle_rx_release(args, &my_rxtx);
1680 		break;
1681 	case FFA_PARTITION_INFO_GET:
1682 		spmc_handle_partition_info_get(args, &my_rxtx);
1683 		break;
1684 	case FFA_RUN:
1685 		spmc_handle_run(args);
1686 		break;
1687 #endif /*CFG_CORE_SEL1_SPMC*/
1688 	case FFA_INTERRUPT:
1689 		if (IS_ENABLED(CFG_CORE_SEL1_SPMC))
1690 			spmc_set_args(args, FFA_NORMAL_WORLD_RESUME, 0, 0, 0,
1691 				      0, 0);
1692 		else
1693 			spmc_set_args(args, FFA_MSG_WAIT, 0, 0, 0, 0, 0);
1694 		break;
1695 #ifdef ARM64
1696 	case FFA_MSG_SEND_DIRECT_REQ_64:
1697 #endif
1698 	case FFA_MSG_SEND_DIRECT_REQ_32:
1699 		handle_direct_request(args, &my_rxtx);
1700 		break;
1701 #if defined(CFG_CORE_SEL1_SPMC)
1702 #ifdef ARM64
1703 	case FFA_MEM_SHARE_64:
1704 #endif
1705 	case FFA_MEM_SHARE_32:
1706 		handle_mem_share(args, &my_rxtx);
1707 		break;
1708 	case FFA_MEM_RECLAIM:
1709 		if (!IS_ENABLED(CFG_SECURE_PARTITION) ||
1710 		    !ffa_mem_reclaim(args, NULL))
1711 			handle_mem_reclaim(args);
1712 		break;
1713 	case FFA_MEM_FRAG_TX:
1714 		handle_mem_frag_tx(args, &my_rxtx);
1715 		break;
1716 	case FFA_NOTIFICATION_BITMAP_CREATE:
1717 		handle_notification_bitmap_create(args);
1718 		break;
1719 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1720 		handle_notification_bitmap_destroy(args);
1721 		break;
1722 	case FFA_NOTIFICATION_BIND:
1723 		handle_notification_bind(args);
1724 		break;
1725 	case FFA_NOTIFICATION_UNBIND:
1726 		handle_notification_unbind(args);
1727 		break;
1728 	case FFA_NOTIFICATION_GET:
1729 		handle_notification_get(args);
1730 		break;
1731 #ifdef ARM64
1732 	case FFA_NOTIFICATION_INFO_GET_64:
1733 #endif
1734 	case FFA_NOTIFICATION_INFO_GET_32:
1735 		handle_notification_info_get(args);
1736 		break;
1737 #endif /*CFG_CORE_SEL1_SPMC*/
1738 	case FFA_ERROR:
1739 		EMSG("Cannot handle FFA_ERROR(%d)", (int)args->a2);
1740 		if (!IS_ENABLED(CFG_CORE_SEL1_SPMC)) {
1741 			/*
1742 			 * The SPMC will return an FFA_ERROR back so better
1743 			 * panic() now than flooding the log.
1744 			 */
1745 			panic("FFA_ERROR from SPMC is fatal");
1746 		}
1747 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1748 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1749 		break;
1750 	default:
1751 		EMSG("Unhandled FFA function ID %#"PRIx32, (uint32_t)args->a0);
1752 		spmc_set_args(args, FFA_ERROR, FFA_PARAM_MBZ, FFA_NOT_SUPPORTED,
1753 			      FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1754 	}
1755 }
1756 
1757 static TEE_Result yielding_call_with_arg(uint64_t cookie, uint32_t offset)
1758 {
1759 	size_t sz_rpc = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1760 	struct thread_ctx *thr = threads + thread_get_id();
1761 	TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
1762 	struct optee_msg_arg *arg = NULL;
1763 	struct mobj *mobj = NULL;
1764 	uint32_t num_params = 0;
1765 	size_t sz = 0;
1766 
1767 	mobj = mobj_ffa_get_by_cookie(cookie, 0);
1768 	if (!mobj) {
1769 		EMSG("Can't find cookie %#"PRIx64, cookie);
1770 		return TEE_ERROR_BAD_PARAMETERS;
1771 	}
1772 
1773 	res = mobj_inc_map(mobj);
1774 	if (res)
1775 		goto out_put_mobj;
1776 
1777 	res = TEE_ERROR_BAD_PARAMETERS;
1778 	arg = mobj_get_va(mobj, offset, sizeof(*arg));
1779 	if (!arg)
1780 		goto out_dec_map;
1781 
1782 	num_params = READ_ONCE(arg->num_params);
1783 	if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
1784 		goto out_dec_map;
1785 
1786 	sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
1787 
1788 	thr->rpc_arg = mobj_get_va(mobj, offset + sz, sz_rpc);
1789 	if (!thr->rpc_arg)
1790 		goto out_dec_map;
1791 
1792 	virt_on_stdcall();
1793 	res = tee_entry_std(arg, num_params);
1794 
1795 	thread_rpc_shm_cache_clear(&thr->shm_cache);
1796 	thr->rpc_arg = NULL;
1797 
1798 out_dec_map:
1799 	mobj_dec_map(mobj);
1800 out_put_mobj:
1801 	mobj_put(mobj);
1802 	return res;
1803 }
1804 
1805 /*
1806  * Helper routine for the assembly function thread_std_smc_entry()
1807  *
1808  * Note: this function is weak just to make link_dummies_paged.c happy.
1809  */
1810 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1,
1811 				       uint32_t a2, uint32_t a3,
1812 				       uint32_t a4, uint32_t a5 __unused)
1813 {
1814 	/*
1815 	 * Arguments are supplied from handle_yielding_call() as:
1816 	 * a0 <- w1
1817 	 * a1 <- w3
1818 	 * a2 <- w4
1819 	 * a3 <- w5
1820 	 * a4 <- w6
1821 	 * a5 <- w7
1822 	 */
1823 	thread_get_tsd()->rpc_target_info = swap_src_dst(a0);
1824 	if (a1 == OPTEE_FFA_YIELDING_CALL_WITH_ARG)
1825 		return yielding_call_with_arg(reg_pair_to_64(a3, a2), a4);
1826 	return FFA_DENIED;
1827 }
1828 
1829 static bool set_fmem(struct optee_msg_param *param, struct thread_param *tpm)
1830 {
1831 	uint64_t offs = tpm->u.memref.offs;
1832 
1833 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
1834 		      OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
1835 
1836 	param->u.fmem.offs_low = offs;
1837 	param->u.fmem.offs_high = offs >> 32;
1838 	if (param->u.fmem.offs_high != offs >> 32)
1839 		return false;
1840 
1841 	param->u.fmem.size = tpm->u.memref.size;
1842 	if (tpm->u.memref.mobj) {
1843 		uint64_t cookie = mobj_get_cookie(tpm->u.memref.mobj);
1844 
1845 		/* If a mobj is passed it better be one with a valid cookie. */
1846 		if (cookie == OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
1847 			return false;
1848 		param->u.fmem.global_id = cookie;
1849 	} else {
1850 		param->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
1851 	}
1852 
1853 	return true;
1854 }
1855 
1856 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
1857 			    struct thread_param *params,
1858 			    struct optee_msg_arg **arg_ret)
1859 {
1860 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
1861 	struct thread_ctx *thr = threads + thread_get_id();
1862 	struct optee_msg_arg *arg = thr->rpc_arg;
1863 
1864 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
1865 		return TEE_ERROR_BAD_PARAMETERS;
1866 
1867 	if (!arg) {
1868 		EMSG("rpc_arg not set");
1869 		return TEE_ERROR_GENERIC;
1870 	}
1871 
1872 	memset(arg, 0, sz);
1873 	arg->cmd = cmd;
1874 	arg->num_params = num_params;
1875 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
1876 
1877 	for (size_t n = 0; n < num_params; n++) {
1878 		switch (params[n].attr) {
1879 		case THREAD_PARAM_ATTR_NONE:
1880 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
1881 			break;
1882 		case THREAD_PARAM_ATTR_VALUE_IN:
1883 		case THREAD_PARAM_ATTR_VALUE_OUT:
1884 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1885 			arg->params[n].attr = params[n].attr -
1886 					      THREAD_PARAM_ATTR_VALUE_IN +
1887 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1888 			arg->params[n].u.value.a = params[n].u.value.a;
1889 			arg->params[n].u.value.b = params[n].u.value.b;
1890 			arg->params[n].u.value.c = params[n].u.value.c;
1891 			break;
1892 		case THREAD_PARAM_ATTR_MEMREF_IN:
1893 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1894 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1895 			if (!set_fmem(arg->params + n, params + n))
1896 				return TEE_ERROR_BAD_PARAMETERS;
1897 			break;
1898 		default:
1899 			return TEE_ERROR_BAD_PARAMETERS;
1900 		}
1901 	}
1902 
1903 	if (arg_ret)
1904 		*arg_ret = arg;
1905 
1906 	return TEE_SUCCESS;
1907 }
1908 
1909 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
1910 				struct thread_param *params)
1911 {
1912 	for (size_t n = 0; n < num_params; n++) {
1913 		switch (params[n].attr) {
1914 		case THREAD_PARAM_ATTR_VALUE_OUT:
1915 		case THREAD_PARAM_ATTR_VALUE_INOUT:
1916 			params[n].u.value.a = arg->params[n].u.value.a;
1917 			params[n].u.value.b = arg->params[n].u.value.b;
1918 			params[n].u.value.c = arg->params[n].u.value.c;
1919 			break;
1920 		case THREAD_PARAM_ATTR_MEMREF_OUT:
1921 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
1922 			params[n].u.memref.size = arg->params[n].u.fmem.size;
1923 			break;
1924 		default:
1925 			break;
1926 		}
1927 	}
1928 
1929 	return arg->ret;
1930 }
1931 
1932 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
1933 			struct thread_param *params)
1934 {
1935 	struct thread_rpc_arg rpc_arg = { .call = {
1936 			.w1 = thread_get_tsd()->rpc_target_info,
1937 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1938 		},
1939 	};
1940 	struct optee_msg_arg *arg = NULL;
1941 	uint32_t ret = 0;
1942 
1943 	ret = get_rpc_arg(cmd, num_params, params, &arg);
1944 	if (ret)
1945 		return ret;
1946 
1947 	thread_rpc(&rpc_arg);
1948 
1949 	return get_rpc_arg_res(arg, num_params, params);
1950 }
1951 
1952 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
1953 {
1954 	struct thread_rpc_arg rpc_arg = { .call = {
1955 			.w1 = thread_get_tsd()->rpc_target_info,
1956 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1957 		},
1958 	};
1959 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
1960 	uint32_t res2 = 0;
1961 	uint32_t res = 0;
1962 
1963 	DMSG("freeing cookie %#"PRIx64, cookie);
1964 
1965 	res = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param, NULL);
1966 
1967 	mobj_put(mobj);
1968 	res2 = mobj_ffa_unregister_by_cookie(cookie);
1969 	if (res2)
1970 		DMSG("mobj_ffa_unregister_by_cookie(%#"PRIx64"): %#"PRIx32,
1971 		     cookie, res2);
1972 	if (!res)
1973 		thread_rpc(&rpc_arg);
1974 }
1975 
1976 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
1977 {
1978 	struct thread_rpc_arg rpc_arg = { .call = {
1979 			.w1 = thread_get_tsd()->rpc_target_info,
1980 			.w4 = OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD,
1981 		},
1982 	};
1983 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
1984 	struct optee_msg_arg *arg = NULL;
1985 	unsigned int internal_offset = 0;
1986 	struct mobj *mobj = NULL;
1987 	uint64_t cookie = 0;
1988 
1989 	if (get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param, &arg))
1990 		return NULL;
1991 
1992 	thread_rpc(&rpc_arg);
1993 
1994 	if (arg->num_params != 1 ||
1995 	    arg->params->attr != OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT)
1996 		return NULL;
1997 
1998 	internal_offset = READ_ONCE(arg->params->u.fmem.internal_offs);
1999 	cookie = READ_ONCE(arg->params->u.fmem.global_id);
2000 	mobj = mobj_ffa_get_by_cookie(cookie, internal_offset);
2001 	if (!mobj) {
2002 		DMSG("mobj_ffa_get_by_cookie(%#"PRIx64", %#x): failed",
2003 		     cookie, internal_offset);
2004 		return NULL;
2005 	}
2006 
2007 	assert(mobj_is_nonsec(mobj));
2008 
2009 	if (mobj->size < size) {
2010 		DMSG("Mobj %#"PRIx64": wrong size", cookie);
2011 		mobj_put(mobj);
2012 		return NULL;
2013 	}
2014 
2015 	if (mobj_inc_map(mobj)) {
2016 		DMSG("mobj_inc_map(%#"PRIx64"): failed", cookie);
2017 		mobj_put(mobj);
2018 		return NULL;
2019 	}
2020 
2021 	return mobj;
2022 }
2023 
2024 struct mobj *thread_rpc_alloc_payload(size_t size)
2025 {
2026 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
2027 }
2028 
2029 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
2030 {
2031 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
2032 }
2033 
2034 void thread_rpc_free_kernel_payload(struct mobj *mobj)
2035 {
2036 	if (mobj)
2037 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
2038 				mobj_get_cookie(mobj), mobj);
2039 }
2040 
2041 void thread_rpc_free_payload(struct mobj *mobj)
2042 {
2043 	if (mobj)
2044 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
2045 				mobj);
2046 }
2047 
2048 struct mobj *thread_rpc_alloc_global_payload(size_t size)
2049 {
2050 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
2051 }
2052 
2053 void thread_rpc_free_global_payload(struct mobj *mobj)
2054 {
2055 	if (mobj)
2056 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL,
2057 				mobj_get_cookie(mobj), mobj);
2058 }
2059 
2060 void thread_spmc_register_secondary_ep(vaddr_t ep)
2061 {
2062 	unsigned long ret = 0;
2063 
2064 	/* Let the SPM know the entry point for secondary CPUs */
2065 	ret = thread_smc(FFA_SECONDARY_EP_REGISTER_64, ep, 0, 0);
2066 
2067 	if (ret != FFA_SUCCESS_32 && ret != FFA_SUCCESS_64)
2068 		EMSG("FFA_SECONDARY_EP_REGISTER_64 ret %#lx", ret);
2069 }
2070 
2071 #if defined(CFG_CORE_SEL1_SPMC)
2072 static TEE_Result spmc_init(void)
2073 {
2074 	my_endpoint_id = SPMC_ENDPOINT_ID;
2075 	DMSG("My endpoint ID %#x", my_endpoint_id);
2076 
2077 	/*
2078 	 * If SPMD think we are version 1.0 it will report version 1.0 to
2079 	 * normal world regardless of what version we query the SPM with.
2080 	 * However, if SPMD think we are version 1.1 it will forward
2081 	 * queries from normal world to let us negotiate version. So by
2082 	 * setting version 1.0 here we should be compatible.
2083 	 *
2084 	 * Note that disagreement on negotiated version means that we'll
2085 	 * have communication problems with normal world.
2086 	 */
2087 	my_rxtx.ffa_vers = FFA_VERSION_1_0;
2088 
2089 	return TEE_SUCCESS;
2090 }
2091 #else /* !defined(CFG_CORE_SEL1_SPMC) */
2092 static void spmc_rxtx_map(struct ffa_rxtx *rxtx)
2093 {
2094 	struct thread_smc_args args = {
2095 #ifdef ARM64
2096 		.a0 = FFA_RXTX_MAP_64,
2097 #else
2098 		.a0 = FFA_RXTX_MAP_32,
2099 #endif
2100 		.a1 = virt_to_phys(rxtx->tx),
2101 		.a2 = virt_to_phys(rxtx->rx),
2102 		.a3 = 1,
2103 	};
2104 
2105 	thread_smccc(&args);
2106 	if (!is_ffa_success(args.a0)) {
2107 		if (args.a0 == FFA_ERROR)
2108 			EMSG("rxtx map failed with error %ld", args.a2);
2109 		else
2110 			EMSG("rxtx map failed");
2111 		panic();
2112 	}
2113 }
2114 
2115 static uint16_t get_my_id(void)
2116 {
2117 	struct thread_smc_args args = {
2118 		.a0 = FFA_ID_GET,
2119 	};
2120 
2121 	thread_smccc(&args);
2122 	if (!is_ffa_success(args.a0)) {
2123 		if (args.a0 == FFA_ERROR)
2124 			EMSG("Get id failed with error %ld", args.a2);
2125 		else
2126 			EMSG("Get id failed");
2127 		panic();
2128 	}
2129 
2130 	return args.a2;
2131 }
2132 
2133 static uint32_t get_ffa_version(uint32_t my_version)
2134 {
2135 	struct thread_smc_args args = {
2136 		.a0 = FFA_VERSION,
2137 		.a1 = my_version,
2138 	};
2139 
2140 	thread_smccc(&args);
2141 	if (args.a0 & BIT(31)) {
2142 		EMSG("FF-A version failed with error %ld", args.a0);
2143 		panic();
2144 	}
2145 
2146 	return args.a0;
2147 }
2148 
2149 static void *spmc_retrieve_req(uint64_t cookie,
2150 			       struct ffa_mem_transaction_x *trans)
2151 {
2152 	struct ffa_mem_access *acc_descr_array = NULL;
2153 	struct ffa_mem_access_perm *perm_descr = NULL;
2154 	struct thread_smc_args args = {
2155 		.a0 = FFA_MEM_RETRIEVE_REQ_32,
2156 		.a3 =	0,	/* Address, Using TX -> MBZ */
2157 		.a4 =   0,	/* Using TX -> MBZ */
2158 	};
2159 	size_t size = 0;
2160 	int rc = 0;
2161 
2162 	if (my_rxtx.ffa_vers == FFA_VERSION_1_0) {
2163 		struct ffa_mem_transaction_1_0 *trans_descr = my_rxtx.tx;
2164 
2165 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2166 		memset(trans_descr, 0, size);
2167 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2168 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2169 		trans_descr->global_handle = cookie;
2170 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2171 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2172 		trans_descr->mem_access_count = 1;
2173 		acc_descr_array = trans_descr->mem_access_array;
2174 	} else {
2175 		struct ffa_mem_transaction_1_1 *trans_descr = my_rxtx.tx;
2176 
2177 		size = sizeof(*trans_descr) + 1 * sizeof(struct ffa_mem_access);
2178 		memset(trans_descr, 0, size);
2179 		trans_descr->sender_id = thread_get_tsd()->rpc_target_info;
2180 		trans_descr->mem_reg_attr = FFA_NORMAL_MEM_REG_ATTR;
2181 		trans_descr->global_handle = cookie;
2182 		trans_descr->flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE |
2183 				     FFA_MEMORY_REGION_FLAG_ANY_ALIGNMENT;
2184 		trans_descr->mem_access_count = 1;
2185 		trans_descr->mem_access_offs = sizeof(*trans_descr);
2186 		trans_descr->mem_access_size = sizeof(struct ffa_mem_access);
2187 		acc_descr_array = (void *)((vaddr_t)my_rxtx.tx +
2188 					   sizeof(*trans_descr));
2189 	}
2190 	acc_descr_array->region_offs = 0;
2191 	acc_descr_array->reserved = 0;
2192 	perm_descr = &acc_descr_array->access_perm;
2193 	perm_descr->endpoint_id = my_endpoint_id;
2194 	perm_descr->perm = FFA_MEM_ACC_RW;
2195 	perm_descr->flags = 0;
2196 
2197 	args.a1 = size; /* Total Length */
2198 	args.a2 = size; /* Frag Length == Total length */
2199 	thread_smccc(&args);
2200 	if (args.a0 != FFA_MEM_RETRIEVE_RESP) {
2201 		if (args.a0 == FFA_ERROR)
2202 			EMSG("Failed to fetch cookie %#"PRIx64" error code %d",
2203 			     cookie, (int)args.a2);
2204 		else
2205 			EMSG("Failed to fetch cookie %#"PRIx64" a0 %#"PRIx64,
2206 			     cookie, args.a0);
2207 		return NULL;
2208 	}
2209 	rc = spmc_read_mem_transaction(my_rxtx.ffa_vers, my_rxtx.rx,
2210 				       my_rxtx.size, trans);
2211 	if (rc) {
2212 		EMSG("Memory transaction failure for cookie %#"PRIx64" rc %d",
2213 		     cookie, rc);
2214 		return NULL;
2215 	}
2216 
2217 	return my_rxtx.rx;
2218 }
2219 
2220 void thread_spmc_relinquish(uint64_t cookie)
2221 {
2222 	struct ffa_mem_relinquish *relinquish_desc = my_rxtx.tx;
2223 	struct thread_smc_args args = {
2224 		.a0 = FFA_MEM_RELINQUISH,
2225 	};
2226 
2227 	memset(relinquish_desc, 0, sizeof(*relinquish_desc));
2228 	relinquish_desc->handle = cookie;
2229 	relinquish_desc->flags = 0;
2230 	relinquish_desc->endpoint_count = 1;
2231 	relinquish_desc->endpoint_id_array[0] = my_endpoint_id;
2232 	thread_smccc(&args);
2233 	if (!is_ffa_success(args.a0))
2234 		EMSG("Failed to relinquish cookie %#"PRIx64, cookie);
2235 }
2236 
2237 static int set_pages(struct ffa_address_range *regions,
2238 		     unsigned int num_regions, unsigned int num_pages,
2239 		     struct mobj_ffa *mf)
2240 {
2241 	unsigned int n = 0;
2242 	unsigned int idx = 0;
2243 
2244 	for (n = 0; n < num_regions; n++) {
2245 		unsigned int page_count = READ_ONCE(regions[n].page_count);
2246 		uint64_t addr = READ_ONCE(regions[n].address);
2247 
2248 		if (mobj_ffa_add_pages_at(mf, &idx, addr, page_count))
2249 			return FFA_INVALID_PARAMETERS;
2250 	}
2251 
2252 	if (idx != num_pages)
2253 		return FFA_INVALID_PARAMETERS;
2254 
2255 	return 0;
2256 }
2257 
2258 struct mobj_ffa *thread_spmc_populate_mobj_from_rx(uint64_t cookie)
2259 {
2260 	struct mobj_ffa *ret = NULL;
2261 	struct ffa_mem_transaction_x retrieve_desc = { };
2262 	struct ffa_mem_access *descr_array = NULL;
2263 	struct ffa_mem_region *descr = NULL;
2264 	struct mobj_ffa *mf = NULL;
2265 	unsigned int num_pages = 0;
2266 	unsigned int offs = 0;
2267 	void *buf = NULL;
2268 	struct thread_smc_args ffa_rx_release_args = {
2269 		.a0 = FFA_RX_RELEASE
2270 	};
2271 
2272 	/*
2273 	 * OP-TEE is only supporting a single mem_region while the
2274 	 * specification allows for more than one.
2275 	 */
2276 	buf = spmc_retrieve_req(cookie, &retrieve_desc);
2277 	if (!buf) {
2278 		EMSG("Failed to retrieve cookie from rx buffer %#"PRIx64,
2279 		     cookie);
2280 		return NULL;
2281 	}
2282 
2283 	descr_array = (void *)((vaddr_t)buf + retrieve_desc.mem_access_offs);
2284 	offs = READ_ONCE(descr_array->region_offs);
2285 	descr = (struct ffa_mem_region *)((vaddr_t)buf + offs);
2286 
2287 	num_pages = READ_ONCE(descr->total_page_count);
2288 	mf = mobj_ffa_spmc_new(cookie, num_pages);
2289 	if (!mf)
2290 		goto out;
2291 
2292 	if (set_pages(descr->address_range_array,
2293 		      READ_ONCE(descr->address_range_count), num_pages, mf)) {
2294 		mobj_ffa_spmc_delete(mf);
2295 		goto out;
2296 	}
2297 
2298 	ret = mf;
2299 
2300 out:
2301 	/* Release RX buffer after the mem retrieve request. */
2302 	thread_smccc(&ffa_rx_release_args);
2303 
2304 	return ret;
2305 }
2306 
2307 static TEE_Result spmc_init(void)
2308 {
2309 	unsigned int major = 0;
2310 	unsigned int minor __maybe_unused = 0;
2311 	uint32_t my_vers = 0;
2312 	uint32_t vers = 0;
2313 
2314 	my_vers = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
2315 	vers = get_ffa_version(my_vers);
2316 	major = (vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK;
2317 	minor = (vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK;
2318 	DMSG("SPMC reported version %u.%u", major, minor);
2319 	if (major != FFA_VERSION_MAJOR) {
2320 		EMSG("Incompatible major version %u, expected %u",
2321 		     major, FFA_VERSION_MAJOR);
2322 		panic();
2323 	}
2324 	if (vers < my_vers)
2325 		my_vers = vers;
2326 	DMSG("Using version %u.%u",
2327 	     (my_vers >> FFA_VERSION_MAJOR_SHIFT) & FFA_VERSION_MAJOR_MASK,
2328 	     (my_vers >> FFA_VERSION_MINOR_SHIFT) & FFA_VERSION_MINOR_MASK);
2329 	my_rxtx.ffa_vers = my_vers;
2330 
2331 	spmc_rxtx_map(&my_rxtx);
2332 	my_endpoint_id = get_my_id();
2333 	DMSG("My endpoint ID %#x", my_endpoint_id);
2334 
2335 	if (!ffa_features(FFA_NOTIFICATION_SET)) {
2336 		spmc_notif_is_ready = true;
2337 		DMSG("Asynchronous notifications are ready");
2338 	}
2339 
2340 	return TEE_SUCCESS;
2341 }
2342 #endif /* !defined(CFG_CORE_SEL1_SPMC) */
2343 
2344 /*
2345  * boot_final() is always done before exiting at end of boot
2346  * initialization.  In case of virtualization the init-calls are done only
2347  * once a OP-TEE partition has been created. So with virtualization we have
2348  * to initialize via boot_final() to make sure we have a value assigned
2349  * before it's used the first time.
2350  */
2351 #ifdef CFG_NS_VIRTUALIZATION
2352 boot_final(spmc_init);
2353 #else
2354 service_init(spmc_init);
2355 #endif
2356