xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 8cf8403b7f1ddbb2c0c9e4e5ef1bc04fa402024b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/phys_mem.h>
22 #include <mm/tee_mm.h>
23 #include <platform_config.h>
24 #include <sm/optee_smc.h>
25 #include <string.h>
26 #include <string_ext.h>
27 #include <util.h>
28 
29 LIST_HEAD(prtn_list_head, guest_partition);
30 
31 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
32 
33 static struct prtn_list_head prtn_list __nex_data =
34 	LIST_HEAD_INITIALIZER(prtn_list);
35 static struct prtn_list_head prtn_destroy_list __nex_data =
36 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
37 
38 /* Memory used by OP-TEE core */
39 struct memory_map *kmem_map __nex_bss;
40 
41 struct guest_spec_data {
42 	size_t size;
43 	void (*destroy)(void *data);
44 };
45 
46 static bool add_disabled __nex_bss;
47 static unsigned gsd_count __nex_bss;
48 static struct guest_spec_data *gsd_array __nex_bss;
49 
50 struct guest_partition {
51 	LIST_ENTRY(guest_partition) link;
52 	struct mmu_partition *mmu_prtn;
53 	struct memory_map mem_map;
54 	struct mutex mutex;
55 	void *tables_va;
56 	tee_mm_entry_t *tee_ram;
57 	tee_mm_entry_t *ta_ram;
58 	tee_mm_entry_t *tables;
59 	bool runtime_initialized;
60 	bool got_guest_destroyed;
61 	bool shutting_down;
62 	uint16_t id;
63 	struct refcount refc;
64 #ifdef CFG_CORE_SEL1_SPMC
65 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
66 	uint8_t cookie_count;
67 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
68 #endif
69 	void **data_array;
70 };
71 
72 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
73 
74 static struct guest_partition *get_current_prtn(void)
75 {
76 	struct guest_partition *ret;
77 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
78 
79 	ret = current_partition[get_core_pos()];
80 
81 	thread_unmask_exceptions(exceptions);
82 
83 	return ret;
84 }
85 
86 uint16_t virt_get_current_guest_id(void)
87 {
88 	struct guest_partition *prtn = get_current_prtn();
89 
90 	if (!prtn)
91 		return 0;
92 	return prtn->id;
93 }
94 
95 static void set_current_prtn(struct guest_partition *prtn)
96 {
97 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
98 
99 	current_partition[get_core_pos()] = prtn;
100 
101 	thread_unmask_exceptions(exceptions);
102 }
103 
104 static size_t get_ta_ram_size(void)
105 {
106 	size_t ta_size = nex_phys_mem_get_ta_size();
107 
108 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
109 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
110 }
111 
112 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
113 				     paddr_t tee_data)
114 {
115 	struct tee_mmap_region *map = NULL;
116 	vaddr_t max_va = 0;
117 	size_t n = 0;
118 	/*
119 	 * This function assumes that at time of operation,
120 	 * kmemory_map (aka static_memory_map from core_mmu.c)
121 	 * will not be altered. This is true, because all
122 	 * changes to static_memory_map are done during
123 	 * OP-TEE initialization, while this function will
124 	 * called when hypervisor creates a guest.
125 	 */
126 
127 	/* Allocate entries for virtual guest map */
128 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
129 	if (!mem_map->map)
130 		return TEE_ERROR_OUT_OF_MEMORY;
131 	mem_map->count = kmem_map->count;
132 	mem_map->alloc_count = kmem_map->count + 1;
133 
134 	memcpy(mem_map->map, kmem_map->map,
135 	       sizeof(*mem_map->map) * mem_map->count);
136 
137 	/* Map TEE .data and .bss sections */
138 	for (n = 0; n < mem_map->count; n++) {
139 		map = mem_map->map + n;
140 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
141 			map->type = MEM_AREA_TEE_RAM_RW;
142 			map->attr = core_mmu_type_to_attr(map->type);
143 			map->pa = tee_data;
144 		}
145 		if (map->va + map->size > max_va)
146 			max_va = map->va + map->size;
147 	}
148 
149 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
150 
151 	for (n = 0; n < mem_map->count; n++)
152 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
153 		     teecore_memtype_name(mem_map->map[n].type),
154 		     mem_map->map[n].region_size, mem_map->map[n].pa,
155 		     mem_map->map[n].va, mem_map->map[n].size,
156 		     mem_map->map[n].attr);
157 	return TEE_SUCCESS;
158 }
159 
160 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
161 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
162 		      paddr_size_t secmem1_size)
163 {
164 	size_t n = 0;
165 
166 	/* Init page pool that covers all secure RAM */
167 	nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
168 			  secmem1_size);
169 
170 	/* Carve out areas that are used by OP-TEE core */
171 	for (n = 0; n < mem_map->count; n++) {
172 		struct tee_mmap_region *map = mem_map->map + n;
173 
174 		switch (map->type) {
175 		case MEM_AREA_TEE_RAM_RX:
176 		case MEM_AREA_TEE_RAM_RO:
177 		case MEM_AREA_NEX_RAM_RO:
178 		case MEM_AREA_NEX_RAM_RW:
179 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
180 			     map->type, map->pa, map->pa + map->size);
181 			if (!nex_phys_mem_alloc2(map->pa, map->size))
182 				panic("Can't carve out used area");
183 			break;
184 		default:
185 			continue;
186 		}
187 	}
188 
189 	kmem_map = mem_map;
190 }
191 
192 
193 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
194 {
195 	TEE_Result res = TEE_SUCCESS;
196 	paddr_t original_data_pa = 0;
197 
198 	prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
199 	if (!prtn->tee_ram) {
200 		EMSG("Can't allocate memory for TEE runtime context");
201 		res = TEE_ERROR_OUT_OF_MEMORY;
202 		goto err;
203 	}
204 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
205 
206 	prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
207 	if (!prtn->ta_ram) {
208 		EMSG("Can't allocate memory for TA data");
209 		res = TEE_ERROR_OUT_OF_MEMORY;
210 		goto err;
211 	}
212 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
213 
214 	prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
215 	if (!prtn->tables) {
216 		EMSG("Can't allocate memory for page tables");
217 		res = TEE_ERROR_OUT_OF_MEMORY;
218 		goto err;
219 	}
220 
221 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
222 				      MEM_AREA_SEC_RAM_OVERALL,
223 				      core_mmu_get_total_pages_size());
224 	assert(prtn->tables_va);
225 
226 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
227 	if (!prtn->mmu_prtn) {
228 		res = TEE_ERROR_OUT_OF_MEMORY;
229 		goto err;
230 	}
231 
232 	res = prepare_memory_map(&prtn->mem_map,
233 				 tee_mm_get_smem(prtn->tee_ram));
234 	if (res)
235 		goto err;
236 
237 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
238 
239 	original_data_pa = virt_to_phys(__data_start);
240 	/* Switch to guest's mappings */
241 	core_mmu_set_prtn(prtn->mmu_prtn);
242 
243 	/* clear .bss */
244 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
245 
246 	/* copy .data section from R/O original */
247 	memcpy(__data_start,
248 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
249 			    __data_end - __data_start),
250 	       __data_end - __data_start);
251 
252 	return TEE_SUCCESS;
253 
254 err:
255 	if (prtn->tee_ram)
256 		tee_mm_free(prtn->tee_ram);
257 	if (prtn->ta_ram)
258 		tee_mm_free(prtn->ta_ram);
259 	if (prtn->tables)
260 		tee_mm_free(prtn->tables);
261 	nex_free(prtn->mmu_prtn);
262 	nex_free(prtn->mem_map.map);
263 
264 	return res;
265 }
266 
267 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
268 {
269 	size_t n = 0;
270 
271 	for (n = 0; n < gsd_count; n++) {
272 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
273 			gsd_array[n].destroy(prtn->data_array[n]);
274 		nex_free(prtn->data_array[n]);
275 	}
276 	nex_free(prtn->data_array);
277 	prtn->data_array = NULL;
278 }
279 
280 static TEE_Result alloc_gsd(struct guest_partition *prtn)
281 {
282 	unsigned int n = 0;
283 
284 	if (!gsd_count)
285 		return TEE_SUCCESS;
286 
287 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
288 	if (!prtn->data_array)
289 		return TEE_ERROR_OUT_OF_MEMORY;
290 
291 	for (n = 0; n < gsd_count; n++) {
292 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
293 		if (!prtn->data_array[n]) {
294 			destroy_gsd(prtn, true /*free_only*/);
295 			return TEE_ERROR_OUT_OF_MEMORY;
296 		}
297 	}
298 
299 	return TEE_SUCCESS;
300 }
301 TEE_Result virt_guest_created(uint16_t guest_id)
302 {
303 	struct guest_partition *prtn = NULL;
304 	TEE_Result res = TEE_SUCCESS;
305 	uint32_t exceptions = 0;
306 
307 	prtn = nex_calloc(1, sizeof(*prtn));
308 	if (!prtn)
309 		return TEE_ERROR_OUT_OF_MEMORY;
310 
311 	res = alloc_gsd(prtn);
312 	if (res)
313 		goto err_free_prtn;
314 
315 	prtn->id = guest_id;
316 	mutex_init(&prtn->mutex);
317 	refcount_set(&prtn->refc, 1);
318 	res = configure_guest_prtn_mem(prtn);
319 	if (res)
320 		goto err_free_gsd;
321 
322 	set_current_prtn(prtn);
323 
324 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
325 	phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram),
326 		      tee_mm_get_bytes(prtn->ta_ram));
327 	/* Initialize threads */
328 	thread_init_threads();
329 	/* Do the preinitcalls */
330 	call_preinitcalls();
331 
332 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
333 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
334 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
335 
336 	IMSG("Added guest %d", guest_id);
337 
338 	set_current_prtn(NULL);
339 	core_mmu_set_default_prtn();
340 
341 	return TEE_SUCCESS;
342 
343 err_free_gsd:
344 	destroy_gsd(prtn, true /*free_only*/);
345 err_free_prtn:
346 	nex_free(prtn);
347 	return res;
348 }
349 
350 static bool
351 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
352 {
353 #ifdef CFG_CORE_SEL1_SPMC
354 	int i = 0;
355 
356 	if (prtn->cookie_count)
357 		return true;
358 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
359 	return i >= 0;
360 #else
361 	return false;
362 #endif
363 }
364 
365 static void get_prtn(struct guest_partition *prtn)
366 {
367 	if (!refcount_inc(&prtn->refc))
368 		panic();
369 }
370 
371 uint16_t virt_get_guest_id(struct guest_partition *prtn)
372 {
373 	if (!prtn)
374 		return 0;
375 	return prtn->id;
376 }
377 
378 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
379 {
380 	struct guest_partition *prtn = NULL;
381 
382 	LIST_FOREACH(prtn, &prtn_list, link)
383 		if (!prtn->shutting_down && prtn->id == guest_id)
384 			return prtn;
385 
386 	return NULL;
387 }
388 
389 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
390 {
391 	struct guest_partition *ret = NULL;
392 	uint32_t exceptions = 0;
393 
394 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
395 	if (prtn)
396 		ret = LIST_NEXT(prtn, link);
397 	else
398 		ret = LIST_FIRST(&prtn_list);
399 
400 	while (ret && ret->shutting_down)
401 		ret = LIST_NEXT(prtn, link);
402 	if (ret)
403 		get_prtn(ret);
404 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
405 
406 	virt_put_guest(prtn);
407 
408 	return ret;
409 }
410 
411 struct guest_partition *virt_get_current_guest(void)
412 {
413 	struct guest_partition *prtn = get_current_prtn();
414 
415 	if (prtn)
416 		get_prtn(prtn);
417 	return prtn;
418 }
419 
420 struct guest_partition *virt_get_guest(uint16_t guest_id)
421 {
422 	struct guest_partition *prtn = NULL;
423 	uint32_t exceptions = 0;
424 
425 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
426 	prtn = find_guest_by_id_unlocked(guest_id);
427 	if (prtn)
428 		get_prtn(prtn);
429 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
430 
431 	return prtn;
432 }
433 
434 void virt_put_guest(struct guest_partition *prtn)
435 {
436 	if (prtn && refcount_dec(&prtn->refc)) {
437 		uint32_t exceptions = 0;
438 		bool do_free = true;
439 
440 		assert(prtn->shutting_down);
441 
442 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
443 		LIST_REMOVE(prtn, link);
444 		if (prtn_have_remaining_resources(prtn)) {
445 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
446 			/*
447 			 * Delay the nex_free() until
448 			 * virt_reclaim_cookie_from_destroyed_guest()
449 			 * is done with this partition.
450 			 */
451 			do_free = false;
452 		}
453 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
454 
455 		destroy_gsd(prtn, false /*!free_only*/);
456 		tee_mm_free(prtn->tee_ram);
457 		prtn->tee_ram = NULL;
458 		tee_mm_free(prtn->ta_ram);
459 		prtn->ta_ram = NULL;
460 		tee_mm_free(prtn->tables);
461 		prtn->tables = NULL;
462 		core_free_mmu_prtn(prtn->mmu_prtn);
463 		prtn->mmu_prtn = NULL;
464 		nex_free(prtn->mem_map.map);
465 		prtn->mem_map.map = NULL;
466 		if (do_free)
467 			nex_free(prtn);
468 	}
469 }
470 
471 TEE_Result virt_guest_destroyed(uint16_t guest_id)
472 {
473 	struct guest_partition *prtn = NULL;
474 	uint32_t exceptions = 0;
475 
476 	IMSG("Removing guest %"PRId16, guest_id);
477 
478 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
479 
480 	prtn = find_guest_by_id_unlocked(guest_id);
481 	if (prtn && !prtn->got_guest_destroyed)
482 		prtn->got_guest_destroyed = true;
483 	else
484 		prtn = NULL;
485 
486 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
487 
488 	if (prtn) {
489 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
490 
491 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
492 		prtn->shutting_down = true;
493 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
494 
495 		virt_put_guest(prtn);
496 	} else {
497 		EMSG("Client with id %d is not found", guest_id);
498 	}
499 
500 	return TEE_SUCCESS;
501 }
502 
503 TEE_Result virt_set_guest(uint16_t guest_id)
504 {
505 	struct guest_partition *prtn = get_current_prtn();
506 
507 	/* This can be true only if we return from IRQ RPC */
508 	if (prtn && prtn->id == guest_id)
509 		return TEE_SUCCESS;
510 
511 	if (prtn)
512 		panic("Virtual guest partition is already set");
513 
514 	prtn = virt_get_guest(guest_id);
515 	if (!prtn)
516 		return TEE_ERROR_ITEM_NOT_FOUND;
517 
518 	set_current_prtn(prtn);
519 	core_mmu_set_prtn(prtn->mmu_prtn);
520 
521 	return TEE_SUCCESS;
522 }
523 
524 void virt_unset_guest(void)
525 {
526 	struct guest_partition *prtn = get_current_prtn();
527 
528 	if (!prtn)
529 		return;
530 
531 	set_current_prtn(NULL);
532 	core_mmu_set_default_prtn();
533 	virt_put_guest(prtn);
534 }
535 
536 void virt_on_stdcall(void)
537 {
538 	struct guest_partition *prtn = get_current_prtn();
539 
540 	/* Initialize runtime on first std call */
541 	if (!prtn->runtime_initialized) {
542 		mutex_lock(&prtn->mutex);
543 		if (!prtn->runtime_initialized) {
544 			init_tee_runtime();
545 			call_driver_initcalls();
546 			prtn->runtime_initialized = true;
547 		}
548 		mutex_unlock(&prtn->mutex);
549 	}
550 }
551 
552 struct memory_map *virt_get_memory_map(void)
553 {
554 	struct guest_partition *prtn;
555 
556 	prtn = get_current_prtn();
557 
558 	if (!prtn)
559 		return NULL;
560 
561 	return &prtn->mem_map;
562 }
563 
564 #ifdef CFG_CORE_SEL1_SPMC
565 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
566 {
567 	int i = 0;
568 
569 	for (i = 0; i < prtn->cookie_count; i++)
570 		if (prtn->cookies[i] == cookie)
571 			return i;
572 	return -1;
573 }
574 
575 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
576 {
577 	struct guest_partition *prtn = NULL;
578 	int i = 0;
579 
580 	LIST_FOREACH(prtn, &prtn_list, link) {
581 		i = find_cookie(prtn, cookie);
582 		if (i >= 0) {
583 			if (idx)
584 				*idx = i;
585 			return prtn;
586 		}
587 	}
588 
589 	return NULL;
590 }
591 
592 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
593 {
594 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
595 	struct guest_partition *prtn = NULL;
596 	uint32_t exceptions = 0;
597 
598 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
599 	if (find_prtn_cookie(cookie, NULL))
600 		goto out;
601 
602 	prtn = current_partition[get_core_pos()];
603 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
604 		prtn->cookies[prtn->cookie_count] = cookie;
605 		prtn->cookie_count++;
606 		res = TEE_SUCCESS;
607 	}
608 out:
609 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
610 
611 	return res;
612 }
613 
614 void virt_remove_cookie(uint64_t cookie)
615 {
616 	struct guest_partition *prtn = NULL;
617 	uint32_t exceptions = 0;
618 	int i = 0;
619 
620 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
621 	prtn = find_prtn_cookie(cookie, &i);
622 	if (prtn) {
623 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
624 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
625 		prtn->cookie_count--;
626 	}
627 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
628 }
629 
630 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
631 {
632 	struct guest_partition *prtn = NULL;
633 	uint32_t exceptions = 0;
634 	uint16_t ret = 0;
635 
636 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
637 	prtn = find_prtn_cookie(cookie, NULL);
638 	if (prtn)
639 		ret = prtn->id;
640 
641 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
642 
643 	return ret;
644 }
645 
646 bitstr_t *virt_get_shm_bits(void)
647 {
648 	return get_current_prtn()->shm_bits;
649 }
650 
651 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
652 {
653 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
654 		size_t n = 0;
655 
656 		for (n = 0; n < prtn->cookie_count; n++) {
657 			if (prtn->cookies[n] == cookie) {
658 				memmove(prtn->cookies + n,
659 					prtn->cookies + n + 1,
660 					sizeof(uint64_t) *
661 						(prtn->cookie_count - n - 1));
662 				prtn->cookie_count--;
663 				return TEE_SUCCESS;
664 			}
665 		}
666 	} else {
667 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
668 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
669 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
670 		int64_t i = cookie & ~mask;
671 
672 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
673 		    bit_test(prtn->shm_bits, i)) {
674 			bit_clear(prtn->shm_bits, i);
675 			return TEE_SUCCESS;
676 		}
677 	}
678 
679 	return TEE_ERROR_ITEM_NOT_FOUND;
680 }
681 
682 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
683 						    uint64_t cookie)
684 
685 {
686 	struct guest_partition *prtn = NULL;
687 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
688 	uint32_t exceptions = 0;
689 
690 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
691 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
692 		if (prtn->id == guest_id) {
693 			res = reclaim_cookie(prtn, cookie);
694 			if (prtn_have_remaining_resources(prtn))
695 				prtn = NULL;
696 			else
697 				LIST_REMOVE(prtn, link);
698 			break;
699 		}
700 	}
701 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
702 
703 	nex_free(prtn);
704 
705 	return res;
706 }
707 #endif /*CFG_CORE_SEL1_SPMC*/
708 
709 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
710 				    void (*data_destroy)(void *data))
711 {
712 	void *p = NULL;
713 
714 	/*
715 	 * This function only executes successfully in a single threaded
716 	 * environment before exiting to the normal world the first time.
717 	 * If add_disabled is true, it means we're not in this environment
718 	 * any longer.
719 	 */
720 
721 	if (add_disabled)
722 		return TEE_ERROR_BAD_PARAMETERS;
723 
724 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
725 	if (!p)
726 		return TEE_ERROR_OUT_OF_MEMORY;
727 	gsd_array = p;
728 
729 	gsd_array[gsd_count] = (struct guest_spec_data){
730 		.size = data_size,
731 		.destroy = data_destroy,
732 	};
733 	*data_id = gsd_count + 1;
734 	gsd_count++;
735 	return TEE_SUCCESS;
736 }
737 
738 void *virt_get_guest_spec_data(struct guest_partition *prtn,
739 			       unsigned int data_id)
740 {
741 	assert(data_id);
742 	if (!data_id || !prtn || data_id > gsd_count)
743 		return NULL;
744 	return prtn->data_array[data_id - 1];
745 }
746 
747 static TEE_Result virt_disable_add(void)
748 {
749 	add_disabled = true;
750 
751 	return TEE_SUCCESS;
752 }
753 nex_release_init_resource(virt_disable_add);
754