xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision b8ef8d0b6ff45bcf0c1e092fbe61cb5a32f83e8e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/tee_mm.h>
22 #include <platform_config.h>
23 #include <sm/optee_smc.h>
24 #include <string.h>
25 #include <string_ext.h>
26 #include <util.h>
27 
28 LIST_HEAD(prtn_list_head, guest_partition);
29 
30 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
31 
32 static struct prtn_list_head prtn_list __nex_data =
33 	LIST_HEAD_INITIALIZER(prtn_list);
34 static struct prtn_list_head prtn_destroy_list __nex_data =
35 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
36 
37 /* Free pages used for guest partitions */
38 tee_mm_pool_t virt_mapper_pool __nex_bss;
39 
40 /* Memory used by OP-TEE core */
41 struct memory_map *kmem_map __nex_bss;
42 
43 struct guest_spec_data {
44 	size_t size;
45 	void (*destroy)(void *data);
46 };
47 
48 static bool add_disabled __nex_bss;
49 static unsigned gsd_count __nex_bss;
50 static struct guest_spec_data *gsd_array __nex_bss;
51 
52 struct guest_partition {
53 	LIST_ENTRY(guest_partition) link;
54 	struct mmu_partition *mmu_prtn;
55 	struct memory_map mem_map;
56 	struct mutex mutex;
57 	void *tables_va;
58 	tee_mm_entry_t *tee_ram;
59 	tee_mm_entry_t *ta_ram;
60 	tee_mm_entry_t *tables;
61 	bool runtime_initialized;
62 	bool got_guest_destroyed;
63 	bool shutting_down;
64 	uint16_t id;
65 	struct refcount refc;
66 #ifdef CFG_CORE_SEL1_SPMC
67 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
68 	uint8_t cookie_count;
69 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
70 #endif
71 	void **data_array;
72 };
73 
74 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
75 
76 static struct guest_partition *get_current_prtn(void)
77 {
78 	struct guest_partition *ret;
79 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
80 
81 	ret = current_partition[get_core_pos()];
82 
83 	thread_unmask_exceptions(exceptions);
84 
85 	return ret;
86 }
87 
88 uint16_t virt_get_current_guest_id(void)
89 {
90 	struct guest_partition *prtn = get_current_prtn();
91 
92 	if (!prtn)
93 		return 0;
94 	return prtn->id;
95 }
96 
97 static void set_current_prtn(struct guest_partition *prtn)
98 {
99 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
100 
101 	current_partition[get_core_pos()] = prtn;
102 
103 	thread_unmask_exceptions(exceptions);
104 }
105 
106 static size_t get_ta_ram_size(void)
107 {
108 	size_t ta_size = 0;
109 
110 	core_mmu_get_ta_range(NULL, &ta_size);
111 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
112 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
113 }
114 
115 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
116 				     paddr_t tee_data, paddr_t ta_ram)
117 {
118 	struct tee_mmap_region *map = NULL;
119 	vaddr_t max_va = 0;
120 	size_t n = 0;
121 	/*
122 	 * This function assumes that at time of operation,
123 	 * kmemory_map (aka static_memory_map from core_mmu.c)
124 	 * will not be altered. This is true, because all
125 	 * changes to static_memory_map are done during
126 	 * OP-TEE initialization, while this function will
127 	 * called when hypervisor creates a guest.
128 	 */
129 
130 	/* Allocate entries for virtual guest map */
131 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
132 	if (!mem_map->map)
133 		return TEE_ERROR_OUT_OF_MEMORY;
134 	mem_map->count = kmem_map->count;
135 	mem_map->alloc_count = kmem_map->count + 1;
136 
137 	memcpy(mem_map->map, kmem_map->map,
138 	       sizeof(*mem_map->map) * mem_map->count);
139 
140 	/* Map TEE .data and .bss sections */
141 	for (n = 0; n < mem_map->count; n++) {
142 		map = mem_map->map + n;
143 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
144 			map->type = MEM_AREA_TEE_RAM_RW;
145 			map->attr = core_mmu_type_to_attr(map->type);
146 			map->pa = tee_data;
147 		}
148 		if (map->va + map->size > max_va)
149 			max_va = map->va + map->size;
150 	}
151 
152 	/* Map TA_RAM */
153 	mem_map->count++;
154 	map = ins_array_elem(mem_map->map, mem_map->count,
155 			     sizeof(*mem_map->map), n, NULL);
156 	map->region_size = SMALL_PAGE_SIZE;
157 	map->va = ROUNDUP(max_va, map->region_size);
158 	map->va += (ta_ram - map->va) & CORE_MMU_PGDIR_MASK;
159 	map->pa = ta_ram;
160 	map->size = get_ta_ram_size();
161 	map->type = MEM_AREA_TA_RAM;
162 	map->attr = core_mmu_type_to_attr(map->type);
163 
164 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
165 
166 	for (n = 0; n < mem_map->count; n++)
167 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
168 		     teecore_memtype_name(mem_map->map[n].type),
169 		     mem_map->map[n].region_size, mem_map->map[n].pa,
170 		     mem_map->map[n].va, mem_map->map[n].size,
171 		     mem_map->map[n].attr);
172 	return TEE_SUCCESS;
173 }
174 
175 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
176 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
177 		      paddr_size_t secmem1_size)
178 {
179 	paddr_size_t size = secmem0_size;
180 	paddr_t base = secmem0_base;
181 	size_t n = 0;
182 
183 	if (secmem1_size) {
184 		assert(secmem0_base + secmem0_size <= secmem1_base);
185 		size = secmem1_base + secmem1_size - base;
186 	}
187 
188 	/* Init page pool that covers all secure RAM */
189 	if (!tee_mm_init(&virt_mapper_pool, base, size,
190 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
191 		panic("Can't create pool with free pages");
192 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
193 	     base, base + size);
194 
195 	if (secmem1_size) {
196 		/* Carve out an eventual gap between secmem0 and secmem1 */
197 		base = secmem0_base + secmem0_size;
198 		size = secmem1_base - base;
199 		if (size) {
200 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
201 			     base, size);
202 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
203 				panic("Can't carve out secmem gap");
204 		}
205 	}
206 
207 
208 	/* Carve out areas that are used by OP-TEE core */
209 	for (n = 0; n < mem_map->count; n++) {
210 		struct tee_mmap_region *map = mem_map->map + n;
211 
212 		switch (map->type) {
213 		case MEM_AREA_TEE_RAM_RX:
214 		case MEM_AREA_TEE_RAM_RO:
215 		case MEM_AREA_NEX_RAM_RO:
216 		case MEM_AREA_NEX_RAM_RW:
217 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
218 			     map->type, map->pa, map->pa + map->size);
219 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
220 					   map->size))
221 				panic("Can't carve out used area");
222 			break;
223 		default:
224 			continue;
225 		}
226 	}
227 
228 	kmem_map = mem_map;
229 }
230 
231 
232 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
233 {
234 	TEE_Result res = TEE_SUCCESS;
235 	paddr_t original_data_pa = 0;
236 
237 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
238 	if (!prtn->tee_ram) {
239 		EMSG("Can't allocate memory for TEE runtime context");
240 		res = TEE_ERROR_OUT_OF_MEMORY;
241 		goto err;
242 	}
243 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
244 
245 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
246 	if (!prtn->ta_ram) {
247 		EMSG("Can't allocate memory for TA data");
248 		res = TEE_ERROR_OUT_OF_MEMORY;
249 		goto err;
250 	}
251 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
252 
253 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
254 				   core_mmu_get_total_pages_size());
255 	if (!prtn->tables) {
256 		EMSG("Can't allocate memory for page tables");
257 		res = TEE_ERROR_OUT_OF_MEMORY;
258 		goto err;
259 	}
260 
261 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
262 				      MEM_AREA_SEC_RAM_OVERALL,
263 				      core_mmu_get_total_pages_size());
264 	assert(prtn->tables_va);
265 
266 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
267 	if (!prtn->mmu_prtn) {
268 		res = TEE_ERROR_OUT_OF_MEMORY;
269 		goto err;
270 	}
271 
272 	res = prepare_memory_map(&prtn->mem_map, tee_mm_get_smem(prtn->tee_ram),
273 				 tee_mm_get_smem(prtn->ta_ram));
274 	if (res)
275 		goto err;
276 
277 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
278 
279 	original_data_pa = virt_to_phys(__data_start);
280 	/* Switch to guest's mappings */
281 	core_mmu_set_prtn(prtn->mmu_prtn);
282 
283 	/* clear .bss */
284 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
285 
286 	/* copy .data section from R/O original */
287 	memcpy(__data_start,
288 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
289 			    __data_end - __data_start),
290 	       __data_end - __data_start);
291 
292 	return TEE_SUCCESS;
293 
294 err:
295 	if (prtn->tee_ram)
296 		tee_mm_free(prtn->tee_ram);
297 	if (prtn->ta_ram)
298 		tee_mm_free(prtn->ta_ram);
299 	if (prtn->tables)
300 		tee_mm_free(prtn->tables);
301 	nex_free(prtn->mmu_prtn);
302 	nex_free(prtn->mem_map.map);
303 
304 	return res;
305 }
306 
307 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
308 {
309 	size_t n = 0;
310 
311 	for (n = 0; n < gsd_count; n++) {
312 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
313 			gsd_array[n].destroy(prtn->data_array[n]);
314 		nex_free(prtn->data_array[n]);
315 	}
316 	nex_free(prtn->data_array);
317 	prtn->data_array = NULL;
318 }
319 
320 static TEE_Result alloc_gsd(struct guest_partition *prtn)
321 {
322 	unsigned int n = 0;
323 
324 	if (!gsd_count)
325 		return TEE_SUCCESS;
326 
327 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
328 	if (!prtn->data_array)
329 		return TEE_ERROR_OUT_OF_MEMORY;
330 
331 	for (n = 0; n < gsd_count; n++) {
332 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
333 		if (!prtn->data_array[n]) {
334 			destroy_gsd(prtn, true /*free_only*/);
335 			return TEE_ERROR_OUT_OF_MEMORY;
336 		}
337 	}
338 
339 	return TEE_SUCCESS;
340 }
341 TEE_Result virt_guest_created(uint16_t guest_id)
342 {
343 	struct guest_partition *prtn = NULL;
344 	TEE_Result res = TEE_SUCCESS;
345 	uint32_t exceptions = 0;
346 
347 	prtn = nex_calloc(1, sizeof(*prtn));
348 	if (!prtn)
349 		return TEE_ERROR_OUT_OF_MEMORY;
350 
351 	res = alloc_gsd(prtn);
352 	if (res)
353 		goto err_free_prtn;
354 
355 	prtn->id = guest_id;
356 	mutex_init(&prtn->mutex);
357 	refcount_set(&prtn->refc, 1);
358 	res = configure_guest_prtn_mem(prtn);
359 	if (res)
360 		goto err_free_gsd;
361 
362 	set_current_prtn(prtn);
363 
364 	/* Initialize threads */
365 	thread_init_threads();
366 	/* Do the preinitcalls */
367 	call_preinitcalls();
368 
369 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
370 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
371 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
372 
373 	IMSG("Added guest %d", guest_id);
374 
375 	set_current_prtn(NULL);
376 	core_mmu_set_default_prtn();
377 
378 	return TEE_SUCCESS;
379 
380 err_free_gsd:
381 	destroy_gsd(prtn, true /*free_only*/);
382 err_free_prtn:
383 	nex_free(prtn);
384 	return res;
385 }
386 
387 static bool
388 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
389 {
390 #ifdef CFG_CORE_SEL1_SPMC
391 	int i = 0;
392 
393 	if (prtn->cookie_count)
394 		return true;
395 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
396 	return i >= 0;
397 #else
398 	return false;
399 #endif
400 }
401 
402 static void get_prtn(struct guest_partition *prtn)
403 {
404 	if (!refcount_inc(&prtn->refc))
405 		panic();
406 }
407 
408 uint16_t virt_get_guest_id(struct guest_partition *prtn)
409 {
410 	if (!prtn)
411 		return 0;
412 	return prtn->id;
413 }
414 
415 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
416 {
417 	struct guest_partition *prtn = NULL;
418 
419 	LIST_FOREACH(prtn, &prtn_list, link)
420 		if (!prtn->shutting_down && prtn->id == guest_id)
421 			return prtn;
422 
423 	return NULL;
424 }
425 
426 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
427 {
428 	struct guest_partition *ret = NULL;
429 	uint32_t exceptions = 0;
430 
431 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
432 	if (prtn)
433 		ret = LIST_NEXT(prtn, link);
434 	else
435 		ret = LIST_FIRST(&prtn_list);
436 
437 	while (ret && ret->shutting_down)
438 		ret = LIST_NEXT(prtn, link);
439 	if (ret)
440 		get_prtn(ret);
441 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
442 
443 	virt_put_guest(prtn);
444 
445 	return ret;
446 }
447 
448 struct guest_partition *virt_get_current_guest(void)
449 {
450 	struct guest_partition *prtn = get_current_prtn();
451 
452 	if (prtn)
453 		get_prtn(prtn);
454 	return prtn;
455 }
456 
457 struct guest_partition *virt_get_guest(uint16_t guest_id)
458 {
459 	struct guest_partition *prtn = NULL;
460 	uint32_t exceptions = 0;
461 
462 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
463 	prtn = find_guest_by_id_unlocked(guest_id);
464 	if (prtn)
465 		get_prtn(prtn);
466 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
467 
468 	return prtn;
469 }
470 
471 void virt_put_guest(struct guest_partition *prtn)
472 {
473 	if (prtn && refcount_dec(&prtn->refc)) {
474 		uint32_t exceptions = 0;
475 		bool do_free = true;
476 
477 		assert(prtn->shutting_down);
478 
479 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
480 		LIST_REMOVE(prtn, link);
481 		if (prtn_have_remaining_resources(prtn)) {
482 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
483 			/*
484 			 * Delay the nex_free() until
485 			 * virt_reclaim_cookie_from_destroyed_guest()
486 			 * is done with this partition.
487 			 */
488 			do_free = false;
489 		}
490 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
491 
492 		destroy_gsd(prtn, false /*!free_only*/);
493 		tee_mm_free(prtn->tee_ram);
494 		prtn->tee_ram = NULL;
495 		tee_mm_free(prtn->ta_ram);
496 		prtn->ta_ram = NULL;
497 		tee_mm_free(prtn->tables);
498 		prtn->tables = NULL;
499 		core_free_mmu_prtn(prtn->mmu_prtn);
500 		prtn->mmu_prtn = NULL;
501 		nex_free(prtn->mem_map.map);
502 		prtn->mem_map.map = NULL;
503 		if (do_free)
504 			nex_free(prtn);
505 	}
506 }
507 
508 TEE_Result virt_guest_destroyed(uint16_t guest_id)
509 {
510 	struct guest_partition *prtn = NULL;
511 	uint32_t exceptions = 0;
512 
513 	IMSG("Removing guest %"PRId16, guest_id);
514 
515 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
516 
517 	prtn = find_guest_by_id_unlocked(guest_id);
518 	if (prtn && !prtn->got_guest_destroyed)
519 		prtn->got_guest_destroyed = true;
520 	else
521 		prtn = NULL;
522 
523 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
524 
525 	if (prtn) {
526 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
527 
528 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
529 		prtn->shutting_down = true;
530 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
531 
532 		virt_put_guest(prtn);
533 	} else {
534 		EMSG("Client with id %d is not found", guest_id);
535 	}
536 
537 	return TEE_SUCCESS;
538 }
539 
540 TEE_Result virt_set_guest(uint16_t guest_id)
541 {
542 	struct guest_partition *prtn = get_current_prtn();
543 
544 	/* This can be true only if we return from IRQ RPC */
545 	if (prtn && prtn->id == guest_id)
546 		return TEE_SUCCESS;
547 
548 	if (prtn)
549 		panic("Virtual guest partition is already set");
550 
551 	prtn = virt_get_guest(guest_id);
552 	if (!prtn)
553 		return TEE_ERROR_ITEM_NOT_FOUND;
554 
555 	set_current_prtn(prtn);
556 	core_mmu_set_prtn(prtn->mmu_prtn);
557 
558 	return TEE_SUCCESS;
559 }
560 
561 void virt_unset_guest(void)
562 {
563 	struct guest_partition *prtn = get_current_prtn();
564 
565 	if (!prtn)
566 		return;
567 
568 	set_current_prtn(NULL);
569 	core_mmu_set_default_prtn();
570 	virt_put_guest(prtn);
571 }
572 
573 void virt_on_stdcall(void)
574 {
575 	struct guest_partition *prtn = get_current_prtn();
576 
577 	/* Initialize runtime on first std call */
578 	if (!prtn->runtime_initialized) {
579 		mutex_lock(&prtn->mutex);
580 		if (!prtn->runtime_initialized) {
581 			init_tee_runtime();
582 			prtn->runtime_initialized = true;
583 		}
584 		mutex_unlock(&prtn->mutex);
585 	}
586 }
587 
588 struct memory_map *virt_get_memory_map(void)
589 {
590 	struct guest_partition *prtn;
591 
592 	prtn = get_current_prtn();
593 
594 	if (!prtn)
595 		return NULL;
596 
597 	return &prtn->mem_map;
598 }
599 
600 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
601 {
602 	struct guest_partition *prtn = get_current_prtn();
603 
604 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
605 				       MEM_AREA_TA_RAM,
606 				       tee_mm_get_bytes(prtn->ta_ram));
607 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
608 }
609 
610 #ifdef CFG_CORE_SEL1_SPMC
611 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
612 {
613 	int i = 0;
614 
615 	for (i = 0; i < prtn->cookie_count; i++)
616 		if (prtn->cookies[i] == cookie)
617 			return i;
618 	return -1;
619 }
620 
621 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
622 {
623 	struct guest_partition *prtn = NULL;
624 	int i = 0;
625 
626 	LIST_FOREACH(prtn, &prtn_list, link) {
627 		i = find_cookie(prtn, cookie);
628 		if (i >= 0) {
629 			if (idx)
630 				*idx = i;
631 			return prtn;
632 		}
633 	}
634 
635 	return NULL;
636 }
637 
638 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
639 {
640 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
641 	struct guest_partition *prtn = NULL;
642 	uint32_t exceptions = 0;
643 
644 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
645 	if (find_prtn_cookie(cookie, NULL))
646 		goto out;
647 
648 	prtn = current_partition[get_core_pos()];
649 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
650 		prtn->cookies[prtn->cookie_count] = cookie;
651 		prtn->cookie_count++;
652 		res = TEE_SUCCESS;
653 	}
654 out:
655 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
656 
657 	return res;
658 }
659 
660 void virt_remove_cookie(uint64_t cookie)
661 {
662 	struct guest_partition *prtn = NULL;
663 	uint32_t exceptions = 0;
664 	int i = 0;
665 
666 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
667 	prtn = find_prtn_cookie(cookie, &i);
668 	if (prtn) {
669 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
670 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
671 		prtn->cookie_count--;
672 	}
673 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
674 }
675 
676 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
677 {
678 	struct guest_partition *prtn = NULL;
679 	uint32_t exceptions = 0;
680 	uint16_t ret = 0;
681 
682 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
683 	prtn = find_prtn_cookie(cookie, NULL);
684 	if (prtn)
685 		ret = prtn->id;
686 
687 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
688 
689 	return ret;
690 }
691 
692 bitstr_t *virt_get_shm_bits(void)
693 {
694 	return get_current_prtn()->shm_bits;
695 }
696 
697 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
698 {
699 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
700 		size_t n = 0;
701 
702 		for (n = 0; n < prtn->cookie_count; n++) {
703 			if (prtn->cookies[n] == cookie) {
704 				memmove(prtn->cookies + n,
705 					prtn->cookies + n + 1,
706 					sizeof(uint64_t) *
707 						(prtn->cookie_count - n - 1));
708 				prtn->cookie_count--;
709 				return TEE_SUCCESS;
710 			}
711 		}
712 	} else {
713 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
714 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
715 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
716 		int64_t i = cookie & ~mask;
717 
718 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
719 		    bit_test(prtn->shm_bits, i)) {
720 			bit_clear(prtn->shm_bits, i);
721 			return TEE_SUCCESS;
722 		}
723 	}
724 
725 	return TEE_ERROR_ITEM_NOT_FOUND;
726 }
727 
728 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
729 						    uint64_t cookie)
730 
731 {
732 	struct guest_partition *prtn = NULL;
733 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
734 	uint32_t exceptions = 0;
735 
736 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
737 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
738 		if (prtn->id == guest_id) {
739 			res = reclaim_cookie(prtn, cookie);
740 			if (prtn_have_remaining_resources(prtn))
741 				prtn = NULL;
742 			else
743 				LIST_REMOVE(prtn, link);
744 			break;
745 		}
746 	}
747 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
748 
749 	nex_free(prtn);
750 
751 	return res;
752 }
753 #endif /*CFG_CORE_SEL1_SPMC*/
754 
755 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
756 				    void (*data_destroy)(void *data))
757 {
758 	void *p = NULL;
759 
760 	/*
761 	 * This function only executes successfully in a single threaded
762 	 * environment before exiting to the normal world the first time.
763 	 * If add_disabled is true, it means we're not in this environment
764 	 * any longer.
765 	 */
766 
767 	if (add_disabled)
768 		return TEE_ERROR_BAD_PARAMETERS;
769 
770 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
771 	if (!p)
772 		return TEE_ERROR_OUT_OF_MEMORY;
773 	gsd_array = p;
774 
775 	gsd_array[gsd_count] = (struct guest_spec_data){
776 		.size = data_size,
777 		.destroy = data_destroy,
778 	};
779 	*data_id = gsd_count + 1;
780 	gsd_count++;
781 	return TEE_SUCCESS;
782 }
783 
784 void *virt_get_guest_spec_data(struct guest_partition *prtn,
785 			       unsigned int data_id)
786 {
787 	assert(data_id);
788 	if (!data_id || !prtn || data_id > gsd_count)
789 		return NULL;
790 	return prtn->data_array[data_id - 1];
791 }
792 
793 static TEE_Result virt_disable_add(void)
794 {
795 	add_disabled = true;
796 
797 	return TEE_SUCCESS;
798 }
799 nex_release_init_resource(virt_disable_add);
800