xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision c3deb3d6f3b13d0e17fc9efe5880aec039e47594)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/page_alloc.h>
22 #include <mm/phys_mem.h>
23 #include <mm/tee_mm.h>
24 #include <platform_config.h>
25 #include <sm/optee_smc.h>
26 #include <string.h>
27 #include <string_ext.h>
28 #include <util.h>
29 
30 LIST_HEAD(prtn_list_head, guest_partition);
31 
32 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
33 
34 static struct prtn_list_head prtn_list __nex_data =
35 	LIST_HEAD_INITIALIZER(prtn_list);
36 static struct prtn_list_head prtn_destroy_list __nex_data =
37 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
38 
39 /* Memory used by OP-TEE core */
40 struct memory_map *kmem_map __nex_bss;
41 
42 struct guest_spec_data {
43 	size_t size;
44 	void (*destroy)(void *data);
45 };
46 
47 static bool add_disabled __nex_bss;
48 static unsigned gsd_count __nex_bss;
49 static struct guest_spec_data *gsd_array __nex_bss;
50 
51 struct guest_partition {
52 	LIST_ENTRY(guest_partition) link;
53 	struct mmu_partition *mmu_prtn;
54 	struct memory_map mem_map;
55 	struct mutex mutex;
56 	void *tables_va;
57 	tee_mm_entry_t *tee_ram;
58 	tee_mm_entry_t *ta_ram;
59 	tee_mm_entry_t *tables;
60 	bool runtime_initialized;
61 	bool got_guest_destroyed;
62 	bool shutting_down;
63 	uint16_t id;
64 	struct refcount refc;
65 #ifdef CFG_CORE_SEL1_SPMC
66 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
67 	uint8_t cookie_count;
68 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
69 #endif
70 	void **data_array;
71 };
72 
73 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
74 
75 static struct guest_partition *get_current_prtn(void)
76 {
77 	struct guest_partition *ret;
78 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
79 
80 	ret = current_partition[get_core_pos()];
81 
82 	thread_unmask_exceptions(exceptions);
83 
84 	return ret;
85 }
86 
87 uint16_t virt_get_current_guest_id(void)
88 {
89 	struct guest_partition *prtn = get_current_prtn();
90 
91 	if (!prtn)
92 		return 0;
93 	return prtn->id;
94 }
95 
96 static void set_current_prtn(struct guest_partition *prtn)
97 {
98 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
99 
100 	current_partition[get_core_pos()] = prtn;
101 
102 	thread_unmask_exceptions(exceptions);
103 }
104 
105 static size_t get_ta_ram_size(void)
106 {
107 	size_t ta_size = nex_phys_mem_get_ta_size();
108 
109 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
110 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
111 }
112 
113 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
114 				     paddr_t tee_data)
115 {
116 	struct tee_mmap_region *map = NULL;
117 	vaddr_t max_va = 0;
118 	size_t n = 0;
119 	/*
120 	 * This function assumes that at time of operation,
121 	 * kmemory_map (aka static_memory_map from core_mmu.c)
122 	 * will not be altered. This is true, because all
123 	 * changes to static_memory_map are done during
124 	 * OP-TEE initialization, while this function will
125 	 * called when hypervisor creates a guest.
126 	 */
127 
128 	/* Allocate entries for virtual guest map */
129 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
130 	if (!mem_map->map)
131 		return TEE_ERROR_OUT_OF_MEMORY;
132 	mem_map->count = kmem_map->count;
133 	mem_map->alloc_count = kmem_map->count + 1;
134 
135 	memcpy(mem_map->map, kmem_map->map,
136 	       sizeof(*mem_map->map) * mem_map->count);
137 
138 	/* Map TEE .data and .bss sections */
139 	for (n = 0; n < mem_map->count; n++) {
140 		map = mem_map->map + n;
141 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
142 			map->type = MEM_AREA_TEE_RAM_RW;
143 			map->attr = core_mmu_type_to_attr(map->type);
144 			map->pa = tee_data;
145 		}
146 		if (map->va + map->size > max_va)
147 			max_va = map->va + map->size;
148 	}
149 
150 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
151 
152 	for (n = 0; n < mem_map->count; n++)
153 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
154 		     teecore_memtype_name(mem_map->map[n].type),
155 		     mem_map->map[n].region_size, mem_map->map[n].pa,
156 		     mem_map->map[n].va, mem_map->map[n].size,
157 		     mem_map->map[n].attr);
158 	return TEE_SUCCESS;
159 }
160 
161 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
162 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
163 		      paddr_size_t secmem1_size)
164 {
165 	size_t n = 0;
166 
167 	/* Init page pool that covers all secure RAM */
168 	nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
169 			  secmem1_size);
170 
171 	/* Carve out areas that are used by OP-TEE core */
172 	for (n = 0; n < mem_map->count; n++) {
173 		struct tee_mmap_region *map = mem_map->map + n;
174 
175 		switch (map->type) {
176 		case MEM_AREA_TEE_RAM_RX:
177 		case MEM_AREA_TEE_RAM_RO:
178 		case MEM_AREA_NEX_RAM_RO:
179 		case MEM_AREA_NEX_RAM_RW:
180 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
181 			     map->type, map->pa, map->pa + map->size);
182 			if (!nex_phys_mem_alloc2(map->pa, map->size))
183 				panic("Can't carve out used area");
184 			break;
185 		default:
186 			continue;
187 		}
188 	}
189 
190 	kmem_map = mem_map;
191 }
192 
193 
194 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
195 {
196 	TEE_Result res = TEE_SUCCESS;
197 	paddr_t original_data_pa = 0;
198 
199 	prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
200 	if (!prtn->tee_ram) {
201 		EMSG("Can't allocate memory for TEE runtime context");
202 		res = TEE_ERROR_OUT_OF_MEMORY;
203 		goto err;
204 	}
205 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
206 
207 	prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
208 	if (!prtn->ta_ram) {
209 		EMSG("Can't allocate memory for TA data");
210 		res = TEE_ERROR_OUT_OF_MEMORY;
211 		goto err;
212 	}
213 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
214 
215 	prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
216 	if (!prtn->tables) {
217 		EMSG("Can't allocate memory for page tables");
218 		res = TEE_ERROR_OUT_OF_MEMORY;
219 		goto err;
220 	}
221 
222 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
223 				      MEM_AREA_SEC_RAM_OVERALL,
224 				      core_mmu_get_total_pages_size());
225 	assert(prtn->tables_va);
226 
227 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
228 	if (!prtn->mmu_prtn) {
229 		res = TEE_ERROR_OUT_OF_MEMORY;
230 		goto err;
231 	}
232 
233 	res = prepare_memory_map(&prtn->mem_map,
234 				 tee_mm_get_smem(prtn->tee_ram));
235 	if (res)
236 		goto err;
237 
238 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
239 
240 	original_data_pa = virt_to_phys(__data_start);
241 	/* Switch to guest's mappings */
242 	core_mmu_set_prtn(prtn->mmu_prtn);
243 
244 	/* clear .bss */
245 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
246 
247 	/* copy .data section from R/O original */
248 	memcpy(__data_start,
249 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
250 			    __data_end - __data_start),
251 	       __data_end - __data_start);
252 
253 	return TEE_SUCCESS;
254 
255 err:
256 	if (prtn->tee_ram)
257 		tee_mm_free(prtn->tee_ram);
258 	if (prtn->ta_ram)
259 		tee_mm_free(prtn->ta_ram);
260 	if (prtn->tables)
261 		tee_mm_free(prtn->tables);
262 	nex_free(prtn->mmu_prtn);
263 	nex_free(prtn->mem_map.map);
264 
265 	return res;
266 }
267 
268 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
269 {
270 	size_t n = 0;
271 
272 	for (n = 0; n < gsd_count; n++) {
273 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
274 			gsd_array[n].destroy(prtn->data_array[n]);
275 		nex_free(prtn->data_array[n]);
276 	}
277 	nex_free(prtn->data_array);
278 	prtn->data_array = NULL;
279 }
280 
281 static TEE_Result alloc_gsd(struct guest_partition *prtn)
282 {
283 	unsigned int n = 0;
284 
285 	if (!gsd_count)
286 		return TEE_SUCCESS;
287 
288 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
289 	if (!prtn->data_array)
290 		return TEE_ERROR_OUT_OF_MEMORY;
291 
292 	for (n = 0; n < gsd_count; n++) {
293 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
294 		if (!prtn->data_array[n]) {
295 			destroy_gsd(prtn, true /*free_only*/);
296 			return TEE_ERROR_OUT_OF_MEMORY;
297 		}
298 	}
299 
300 	return TEE_SUCCESS;
301 }
302 TEE_Result virt_guest_created(uint16_t guest_id)
303 {
304 	struct guest_partition *prtn = NULL;
305 	TEE_Result res = TEE_SUCCESS;
306 	uint32_t exceptions = 0;
307 
308 	if (guest_id == HYP_CLNT_ID)
309 		return TEE_ERROR_BAD_PARAMETERS;
310 
311 	prtn = nex_calloc(1, sizeof(*prtn));
312 	if (!prtn)
313 		return TEE_ERROR_OUT_OF_MEMORY;
314 
315 	res = alloc_gsd(prtn);
316 	if (res)
317 		goto err_free_prtn;
318 
319 	prtn->id = guest_id;
320 	mutex_init(&prtn->mutex);
321 	refcount_set(&prtn->refc, 1);
322 	res = configure_guest_prtn_mem(prtn);
323 	if (res)
324 		goto err_free_gsd;
325 
326 	set_current_prtn(prtn);
327 
328 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
329 	phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram),
330 		      tee_mm_get_bytes(prtn->ta_ram));
331 	page_alloc_init();
332 	/* Initialize threads */
333 	thread_init_threads();
334 	/* Do the preinitcalls */
335 	call_preinitcalls();
336 
337 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
338 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
339 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
340 
341 	IMSG("Added guest %d", guest_id);
342 
343 	set_current_prtn(NULL);
344 	core_mmu_set_default_prtn();
345 
346 	return TEE_SUCCESS;
347 
348 err_free_gsd:
349 	destroy_gsd(prtn, true /*free_only*/);
350 err_free_prtn:
351 	nex_free(prtn);
352 	return res;
353 }
354 
355 static bool
356 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
357 {
358 #ifdef CFG_CORE_SEL1_SPMC
359 	int i = 0;
360 
361 	if (prtn->cookie_count)
362 		return true;
363 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
364 	return i >= 0;
365 #else
366 	return false;
367 #endif
368 }
369 
370 static void get_prtn(struct guest_partition *prtn)
371 {
372 	if (!refcount_inc(&prtn->refc))
373 		panic();
374 }
375 
376 uint16_t virt_get_guest_id(struct guest_partition *prtn)
377 {
378 	if (!prtn)
379 		return 0;
380 	return prtn->id;
381 }
382 
383 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
384 {
385 	struct guest_partition *prtn = NULL;
386 
387 	LIST_FOREACH(prtn, &prtn_list, link)
388 		if (!prtn->shutting_down && prtn->id == guest_id)
389 			return prtn;
390 
391 	return NULL;
392 }
393 
394 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
395 {
396 	struct guest_partition *ret = NULL;
397 	uint32_t exceptions = 0;
398 
399 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
400 	if (prtn)
401 		ret = LIST_NEXT(prtn, link);
402 	else
403 		ret = LIST_FIRST(&prtn_list);
404 
405 	while (ret && ret->shutting_down)
406 		ret = LIST_NEXT(prtn, link);
407 	if (ret)
408 		get_prtn(ret);
409 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
410 
411 	virt_put_guest(prtn);
412 
413 	return ret;
414 }
415 
416 struct guest_partition *virt_get_current_guest(void)
417 {
418 	struct guest_partition *prtn = get_current_prtn();
419 
420 	if (prtn)
421 		get_prtn(prtn);
422 	return prtn;
423 }
424 
425 struct guest_partition *virt_get_guest(uint16_t guest_id)
426 {
427 	struct guest_partition *prtn = NULL;
428 	uint32_t exceptions = 0;
429 
430 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
431 	prtn = find_guest_by_id_unlocked(guest_id);
432 	if (prtn)
433 		get_prtn(prtn);
434 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
435 
436 	return prtn;
437 }
438 
439 void virt_put_guest(struct guest_partition *prtn)
440 {
441 	if (prtn && refcount_dec(&prtn->refc)) {
442 		uint32_t exceptions = 0;
443 		bool do_free = true;
444 
445 		assert(prtn->shutting_down);
446 
447 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
448 		LIST_REMOVE(prtn, link);
449 		if (prtn_have_remaining_resources(prtn)) {
450 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
451 			/*
452 			 * Delay the nex_free() until
453 			 * virt_reclaim_cookie_from_destroyed_guest()
454 			 * is done with this partition.
455 			 */
456 			do_free = false;
457 		}
458 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
459 
460 		destroy_gsd(prtn, false /*!free_only*/);
461 		tee_mm_free(prtn->tee_ram);
462 		prtn->tee_ram = NULL;
463 		tee_mm_free(prtn->ta_ram);
464 		prtn->ta_ram = NULL;
465 		tee_mm_free(prtn->tables);
466 		prtn->tables = NULL;
467 		core_free_mmu_prtn(prtn->mmu_prtn);
468 		prtn->mmu_prtn = NULL;
469 		nex_free(prtn->mem_map.map);
470 		prtn->mem_map.map = NULL;
471 		if (do_free)
472 			nex_free(prtn);
473 	}
474 }
475 
476 TEE_Result virt_guest_destroyed(uint16_t guest_id)
477 {
478 	struct guest_partition *prtn = NULL;
479 	uint32_t exceptions = 0;
480 
481 	IMSG("Removing guest %"PRId16, guest_id);
482 
483 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
484 
485 	prtn = find_guest_by_id_unlocked(guest_id);
486 	if (prtn && !prtn->got_guest_destroyed)
487 		prtn->got_guest_destroyed = true;
488 	else
489 		prtn = NULL;
490 
491 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
492 
493 	if (prtn) {
494 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
495 
496 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
497 		prtn->shutting_down = true;
498 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
499 
500 		virt_put_guest(prtn);
501 	} else {
502 		EMSG("Client with id %d is not found", guest_id);
503 	}
504 
505 	return TEE_SUCCESS;
506 }
507 
508 TEE_Result virt_set_guest(uint16_t guest_id)
509 {
510 	struct guest_partition *prtn = get_current_prtn();
511 
512 	/* This can be true only if we return from IRQ RPC */
513 	if (prtn && prtn->id == guest_id)
514 		return TEE_SUCCESS;
515 
516 	if (prtn)
517 		panic("Virtual guest partition is already set");
518 
519 	prtn = virt_get_guest(guest_id);
520 	if (!prtn)
521 		return TEE_ERROR_ITEM_NOT_FOUND;
522 
523 	set_current_prtn(prtn);
524 	core_mmu_set_prtn(prtn->mmu_prtn);
525 
526 	return TEE_SUCCESS;
527 }
528 
529 void virt_unset_guest(void)
530 {
531 	struct guest_partition *prtn = get_current_prtn();
532 
533 	if (!prtn)
534 		return;
535 
536 	set_current_prtn(NULL);
537 	core_mmu_set_default_prtn();
538 	virt_put_guest(prtn);
539 }
540 
541 void virt_on_stdcall(void)
542 {
543 	struct guest_partition *prtn = get_current_prtn();
544 
545 	/* Initialize runtime on first std call */
546 	if (!prtn->runtime_initialized) {
547 		mutex_lock(&prtn->mutex);
548 		if (!prtn->runtime_initialized) {
549 			init_tee_runtime();
550 			call_driver_initcalls();
551 			prtn->runtime_initialized = true;
552 		}
553 		mutex_unlock(&prtn->mutex);
554 	}
555 }
556 
557 struct memory_map *virt_get_memory_map(void)
558 {
559 	struct guest_partition *prtn;
560 
561 	prtn = get_current_prtn();
562 
563 	if (!prtn)
564 		return NULL;
565 
566 	return &prtn->mem_map;
567 }
568 
569 #ifdef CFG_CORE_SEL1_SPMC
570 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
571 {
572 	int i = 0;
573 
574 	for (i = 0; i < prtn->cookie_count; i++)
575 		if (prtn->cookies[i] == cookie)
576 			return i;
577 	return -1;
578 }
579 
580 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
581 {
582 	struct guest_partition *prtn = NULL;
583 	int i = 0;
584 
585 	LIST_FOREACH(prtn, &prtn_list, link) {
586 		i = find_cookie(prtn, cookie);
587 		if (i >= 0) {
588 			if (idx)
589 				*idx = i;
590 			return prtn;
591 		}
592 	}
593 
594 	return NULL;
595 }
596 
597 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
598 {
599 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
600 	struct guest_partition *prtn = NULL;
601 	uint32_t exceptions = 0;
602 
603 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
604 	if (find_prtn_cookie(cookie, NULL))
605 		goto out;
606 
607 	prtn = current_partition[get_core_pos()];
608 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
609 		prtn->cookies[prtn->cookie_count] = cookie;
610 		prtn->cookie_count++;
611 		res = TEE_SUCCESS;
612 	}
613 out:
614 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
615 
616 	return res;
617 }
618 
619 void virt_remove_cookie(uint64_t cookie)
620 {
621 	struct guest_partition *prtn = NULL;
622 	uint32_t exceptions = 0;
623 	int i = 0;
624 
625 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
626 	prtn = find_prtn_cookie(cookie, &i);
627 	if (prtn) {
628 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
629 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
630 		prtn->cookie_count--;
631 	}
632 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
633 }
634 
635 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
636 {
637 	struct guest_partition *prtn = NULL;
638 	uint32_t exceptions = 0;
639 	uint16_t ret = 0;
640 
641 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
642 	prtn = find_prtn_cookie(cookie, NULL);
643 	if (prtn)
644 		ret = prtn->id;
645 
646 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
647 
648 	return ret;
649 }
650 
651 bitstr_t *virt_get_shm_bits(void)
652 {
653 	return get_current_prtn()->shm_bits;
654 }
655 
656 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
657 {
658 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
659 		size_t n = 0;
660 
661 		for (n = 0; n < prtn->cookie_count; n++) {
662 			if (prtn->cookies[n] == cookie) {
663 				memmove(prtn->cookies + n,
664 					prtn->cookies + n + 1,
665 					sizeof(uint64_t) *
666 						(prtn->cookie_count - n - 1));
667 				prtn->cookie_count--;
668 				return TEE_SUCCESS;
669 			}
670 		}
671 	} else {
672 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
673 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
674 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
675 		int64_t i = cookie & ~mask;
676 
677 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
678 		    bit_test(prtn->shm_bits, i)) {
679 			bit_clear(prtn->shm_bits, i);
680 			return TEE_SUCCESS;
681 		}
682 	}
683 
684 	return TEE_ERROR_ITEM_NOT_FOUND;
685 }
686 
687 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
688 						    uint64_t cookie)
689 
690 {
691 	struct guest_partition *prtn = NULL;
692 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
693 	uint32_t exceptions = 0;
694 
695 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
696 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
697 		if (prtn->id == guest_id) {
698 			res = reclaim_cookie(prtn, cookie);
699 			if (prtn_have_remaining_resources(prtn))
700 				prtn = NULL;
701 			else
702 				LIST_REMOVE(prtn, link);
703 			break;
704 		}
705 	}
706 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
707 
708 	nex_free(prtn);
709 
710 	return res;
711 }
712 #endif /*CFG_CORE_SEL1_SPMC*/
713 
714 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
715 				    void (*data_destroy)(void *data))
716 {
717 	void *p = NULL;
718 
719 	/*
720 	 * This function only executes successfully in a single threaded
721 	 * environment before exiting to the normal world the first time.
722 	 * If add_disabled is true, it means we're not in this environment
723 	 * any longer.
724 	 */
725 
726 	if (add_disabled)
727 		return TEE_ERROR_BAD_PARAMETERS;
728 
729 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
730 	if (!p)
731 		return TEE_ERROR_OUT_OF_MEMORY;
732 	gsd_array = p;
733 
734 	gsd_array[gsd_count] = (struct guest_spec_data){
735 		.size = data_size,
736 		.destroy = data_destroy,
737 	};
738 	*data_id = gsd_count + 1;
739 	gsd_count++;
740 	return TEE_SUCCESS;
741 }
742 
743 void *virt_get_guest_spec_data(struct guest_partition *prtn,
744 			       unsigned int data_id)
745 {
746 	assert(data_id);
747 	if (!data_id || !prtn || data_id > gsd_count)
748 		return NULL;
749 	return prtn->data_array[data_id - 1];
750 }
751 
752 static TEE_Result virt_disable_add(void)
753 {
754 	add_disabled = true;
755 
756 	return TEE_SUCCESS;
757 }
758 nex_release_init_resource(virt_disable_add);
759