xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision beb9021040c9d5b62e4de85007a132cd0fa69416)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/panic.h>
14 #include <kernel/refcount.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread_spmc.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/tee_mm.h>
21 #include <platform_config.h>
22 #include <sm/optee_smc.h>
23 #include <string.h>
24 #include <util.h>
25 
26 LIST_HEAD(prtn_list_head, guest_partition);
27 
28 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
29 
30 static struct prtn_list_head prtn_list __nex_data =
31 	LIST_HEAD_INITIALIZER(prtn_list);
32 static struct prtn_list_head prtn_destroy_list __nex_data =
33 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
34 
35 /* Free pages used for guest partitions */
36 tee_mm_pool_t virt_mapper_pool __nex_bss;
37 
38 /* Memory used by OP-TEE core */
39 struct tee_mmap_region *kmemory_map __nex_bss;
40 
41 struct guest_spec_data {
42 	size_t size;
43 	void (*destroy)(void *data);
44 };
45 
46 static bool add_disabled __nex_bss;
47 static unsigned gsd_count __nex_bss;
48 static struct guest_spec_data *gsd_array __nex_bss;
49 
50 struct guest_partition {
51 	LIST_ENTRY(guest_partition) link;
52 	struct mmu_partition *mmu_prtn;
53 	struct tee_mmap_region *memory_map;
54 	struct mutex mutex;
55 	void *tables_va;
56 	tee_mm_entry_t *tee_ram;
57 	tee_mm_entry_t *ta_ram;
58 	tee_mm_entry_t *tables;
59 	bool runtime_initialized;
60 	bool shutting_down;
61 	uint16_t id;
62 	struct refcount refc;
63 #ifdef CFG_CORE_SEL1_SPMC
64 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
65 	uint8_t cookie_count;
66 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
67 #endif
68 	void **data_array;
69 };
70 
71 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
72 
73 static struct guest_partition *get_current_prtn(void)
74 {
75 	struct guest_partition *ret;
76 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
77 
78 	ret = current_partition[get_core_pos()];
79 
80 	thread_unmask_exceptions(exceptions);
81 
82 	return ret;
83 }
84 
85 uint16_t virt_get_current_guest_id(void)
86 {
87 	struct guest_partition *prtn = get_current_prtn();
88 
89 	if (!prtn)
90 		return 0;
91 	return prtn->id;
92 }
93 
94 static void set_current_prtn(struct guest_partition *prtn)
95 {
96 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
97 
98 	current_partition[get_core_pos()] = prtn;
99 
100 	thread_unmask_exceptions(exceptions);
101 }
102 
103 static size_t get_ta_ram_size(void)
104 {
105 	size_t ta_size = 0;
106 
107 	core_mmu_get_ta_range(NULL, &ta_size);
108 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
109 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
110 }
111 
112 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
113 						  paddr_t ta_ram)
114 {
115 	int i, entries;
116 	vaddr_t max_va = 0;
117 	struct tee_mmap_region *map;
118 	/*
119 	 * This function assumes that at time of operation,
120 	 * kmemory_map (aka static_memory_map from core_mmu.c)
121 	 * will not be altered. This is true, because all
122 	 * changes to static_memory_map are done during
123 	 * OP-TEE initialization, while this function will
124 	 * called when hypervisor creates a guest.
125 	 */
126 
127 	/* Count number of entries in nexus memory map */
128 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
129 	     map++, entries++)
130 		;
131 
132 	/* Allocate entries for virtual guest map */
133 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
134 	if (!map)
135 		return NULL;
136 
137 	memcpy(map, kmemory_map, sizeof(*map) * entries);
138 
139 	/* Map TEE .data and .bss sections */
140 	for (i = 0; i < entries; i++) {
141 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
142 			map[i].type = MEM_AREA_TEE_RAM_RW;
143 			map[i].attr = core_mmu_type_to_attr(map[i].type);
144 			map[i].pa = tee_data;
145 		}
146 		if (map[i].va + map[i].size > max_va)
147 			max_va = map[i].va + map[i].size;
148 	}
149 
150 	/* Map TA_RAM */
151 	assert(map[entries - 1].type == MEM_AREA_END);
152 	map[entries] = map[entries - 1];
153 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
154 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
155 	map[entries - 1].va +=
156 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
157 	map[entries - 1].pa = ta_ram;
158 	map[entries - 1].size = get_ta_ram_size();
159 	map[entries - 1].type = MEM_AREA_TA_RAM;
160 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
161 
162 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
163 
164 	for (i = 0; i < entries; i++)
165 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
166 		     teecore_memtype_name(map[i].type),
167 		     map[i].region_size, map[i].pa, map[i].va,
168 		     map[i].size, map[i].attr);
169 	return map;
170 }
171 
172 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
173 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
174 		      paddr_size_t secmem1_size)
175 {
176 	struct tee_mmap_region *map = NULL;
177 	paddr_size_t size = secmem0_size;
178 	paddr_t base = secmem0_base;
179 
180 	if (secmem1_size) {
181 		assert(secmem0_base + secmem0_size <= secmem1_base);
182 		size = secmem1_base + secmem1_size - base;
183 	}
184 
185 	/* Init page pool that covers all secure RAM */
186 	if (!tee_mm_init(&virt_mapper_pool, base, size,
187 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
188 		panic("Can't create pool with free pages");
189 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
190 	     base, base + size);
191 
192 	if (secmem1_size) {
193 		/* Carve out an eventual gap between secmem0 and secmem1 */
194 		base = secmem0_base + secmem0_size;
195 		size = secmem1_base - base;
196 		if (size) {
197 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
198 			     base, size);
199 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
200 				panic("Can't carve out secmem gap");
201 		}
202 	}
203 
204 
205 	/* Carve out areas that are used by OP-TEE core */
206 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
207 		switch (map->type) {
208 		case MEM_AREA_TEE_RAM_RX:
209 		case MEM_AREA_TEE_RAM_RO:
210 		case MEM_AREA_NEX_RAM_RO:
211 		case MEM_AREA_NEX_RAM_RW:
212 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
213 			     map->type, map->pa, map->pa + map->size);
214 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
215 					   map->size))
216 				panic("Can't carve out used area");
217 			break;
218 		default:
219 			continue;
220 		}
221 	}
222 
223 	kmemory_map = memory_map;
224 }
225 
226 
227 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
228 {
229 	TEE_Result res = TEE_SUCCESS;
230 	paddr_t original_data_pa = 0;
231 
232 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
233 	if (!prtn->tee_ram) {
234 		EMSG("Can't allocate memory for TEE runtime context");
235 		res = TEE_ERROR_OUT_OF_MEMORY;
236 		goto err;
237 	}
238 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
239 
240 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
241 	if (!prtn->ta_ram) {
242 		EMSG("Can't allocate memory for TA data");
243 		res = TEE_ERROR_OUT_OF_MEMORY;
244 		goto err;
245 	}
246 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
247 
248 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
249 				   core_mmu_get_total_pages_size());
250 	if (!prtn->tables) {
251 		EMSG("Can't allocate memory for page tables");
252 		res = TEE_ERROR_OUT_OF_MEMORY;
253 		goto err;
254 	}
255 
256 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
257 				      MEM_AREA_SEC_RAM_OVERALL,
258 				      core_mmu_get_total_pages_size());
259 	assert(prtn->tables_va);
260 
261 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
262 	if (!prtn->mmu_prtn) {
263 		res = TEE_ERROR_OUT_OF_MEMORY;
264 		goto err;
265 	}
266 
267 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
268 					     tee_mm_get_smem(prtn->ta_ram));
269 	if (!prtn->memory_map) {
270 		res = TEE_ERROR_OUT_OF_MEMORY;
271 		goto err;
272 	}
273 
274 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
275 
276 	original_data_pa = virt_to_phys(__data_start);
277 	/* Switch to guest's mappings */
278 	core_mmu_set_prtn(prtn->mmu_prtn);
279 
280 	/* clear .bss */
281 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
282 
283 	/* copy .data section from R/O original */
284 	memcpy(__data_start,
285 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
286 			    __data_end - __data_start),
287 	       __data_end - __data_start);
288 
289 	return TEE_SUCCESS;
290 
291 err:
292 	if (prtn->tee_ram)
293 		tee_mm_free(prtn->tee_ram);
294 	if (prtn->ta_ram)
295 		tee_mm_free(prtn->ta_ram);
296 	if (prtn->tables)
297 		tee_mm_free(prtn->tables);
298 	nex_free(prtn->mmu_prtn);
299 	nex_free(prtn->memory_map);
300 
301 	return res;
302 }
303 
304 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
305 {
306 	size_t n = 0;
307 
308 	for (n = 0; n < gsd_count; n++) {
309 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
310 			gsd_array[n].destroy(prtn->data_array[n]);
311 		nex_free(prtn->data_array[n]);
312 	}
313 	nex_free(prtn->data_array);
314 	prtn->data_array = NULL;
315 }
316 
317 static TEE_Result alloc_gsd(struct guest_partition *prtn)
318 {
319 	unsigned int n = 0;
320 
321 	if (!gsd_count)
322 		return TEE_SUCCESS;
323 
324 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
325 	if (!prtn->data_array)
326 		return TEE_ERROR_OUT_OF_MEMORY;
327 
328 	for (n = 0; n < gsd_count; n++) {
329 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
330 		if (!prtn->data_array[n]) {
331 			destroy_gsd(prtn, true /*free_only*/);
332 			return TEE_ERROR_OUT_OF_MEMORY;
333 		}
334 	}
335 
336 	return TEE_SUCCESS;
337 }
338 TEE_Result virt_guest_created(uint16_t guest_id)
339 {
340 	struct guest_partition *prtn = NULL;
341 	TEE_Result res = TEE_SUCCESS;
342 	uint32_t exceptions = 0;
343 
344 	prtn = nex_calloc(1, sizeof(*prtn));
345 	if (!prtn)
346 		return TEE_ERROR_OUT_OF_MEMORY;
347 
348 	res = alloc_gsd(prtn);
349 	if (res)
350 		goto err_free_prtn;
351 
352 	prtn->id = guest_id;
353 	mutex_init(&prtn->mutex);
354 	refcount_set(&prtn->refc, 1);
355 	res = configure_guest_prtn_mem(prtn);
356 	if (res)
357 		goto err_free_gsd;
358 
359 	set_current_prtn(prtn);
360 
361 	/* Initialize threads */
362 	thread_init_threads();
363 	/* Do the preinitcalls */
364 	call_preinitcalls();
365 
366 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
367 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
368 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
369 
370 	IMSG("Added guest %d", guest_id);
371 
372 	set_current_prtn(NULL);
373 	core_mmu_set_default_prtn();
374 
375 	return TEE_SUCCESS;
376 
377 err_free_gsd:
378 	destroy_gsd(prtn, true /*free_only*/);
379 err_free_prtn:
380 	nex_free(prtn);
381 	return res;
382 }
383 
384 static bool
385 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
386 {
387 #ifdef CFG_CORE_SEL1_SPMC
388 	int i = 0;
389 
390 	if (prtn->cookie_count)
391 		return true;
392 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
393 	return i >= 0;
394 #else
395 	return false;
396 #endif
397 }
398 
399 static void get_prtn(struct guest_partition *prtn)
400 {
401 	if (!refcount_inc(&prtn->refc))
402 		panic();
403 }
404 
405 uint16_t virt_get_guest_id(struct guest_partition *prtn)
406 {
407 	if (!prtn)
408 		return 0;
409 	return prtn->id;
410 }
411 
412 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
413 {
414 	struct guest_partition *prtn = NULL;
415 
416 	LIST_FOREACH(prtn, &prtn_list, link)
417 		if (!prtn->shutting_down && prtn->id == guest_id)
418 			return prtn;
419 
420 	return NULL;
421 }
422 
423 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
424 {
425 	struct guest_partition *ret = NULL;
426 	uint32_t exceptions = 0;
427 
428 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
429 	if (prtn)
430 		ret = LIST_NEXT(prtn, link);
431 	else
432 		ret = LIST_FIRST(&prtn_list);
433 
434 	while (ret && ret->shutting_down)
435 		ret = LIST_NEXT(prtn, link);
436 	if (ret)
437 		get_prtn(ret);
438 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
439 
440 	virt_put_guest(prtn);
441 
442 	return ret;
443 }
444 
445 struct guest_partition *virt_get_current_guest(void)
446 {
447 	struct guest_partition *prtn = get_current_prtn();
448 
449 	if (prtn)
450 		get_prtn(prtn);
451 	return prtn;
452 }
453 
454 struct guest_partition *virt_get_guest(uint16_t guest_id)
455 {
456 	struct guest_partition *prtn = NULL;
457 	uint32_t exceptions = 0;
458 
459 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
460 	prtn = find_guest_by_id_unlocked(guest_id);
461 	if (prtn)
462 		get_prtn(prtn);
463 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
464 
465 	return prtn;
466 }
467 
468 void virt_put_guest(struct guest_partition *prtn)
469 {
470 	if (prtn && refcount_dec(&prtn->refc)) {
471 		uint32_t exceptions = 0;
472 		bool do_free = true;
473 
474 		assert(prtn->shutting_down);
475 
476 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
477 		LIST_REMOVE(prtn, link);
478 		if (prtn_have_remaining_resources(prtn)) {
479 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
480 			/*
481 			 * Delay the nex_free() until
482 			 * virt_reclaim_cookie_from_destroyed_guest()
483 			 * is done with this partition.
484 			 */
485 			do_free = false;
486 		}
487 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
488 
489 		destroy_gsd(prtn, false /*!free_only*/);
490 		tee_mm_free(prtn->tee_ram);
491 		prtn->tee_ram = NULL;
492 		tee_mm_free(prtn->ta_ram);
493 		prtn->ta_ram = NULL;
494 		tee_mm_free(prtn->tables);
495 		prtn->tables = NULL;
496 		core_free_mmu_prtn(prtn->mmu_prtn);
497 		prtn->mmu_prtn = NULL;
498 		nex_free(prtn->memory_map);
499 		prtn->memory_map = NULL;
500 		if (do_free)
501 			nex_free(prtn);
502 	}
503 }
504 
505 TEE_Result virt_guest_destroyed(uint16_t guest_id)
506 {
507 	struct guest_partition *prtn = NULL;
508 	uint32_t exceptions = 0;
509 
510 	IMSG("Removing guest %"PRId16, guest_id);
511 
512 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
513 
514 	prtn = find_guest_by_id_unlocked(guest_id);
515 	if (prtn)
516 		prtn->shutting_down = true;
517 
518 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
519 
520 	virt_put_guest(prtn);
521 	if (!prtn)
522 		EMSG("Client with id %d is not found", guest_id);
523 
524 	return TEE_SUCCESS;
525 }
526 
527 TEE_Result virt_set_guest(uint16_t guest_id)
528 {
529 	struct guest_partition *prtn = get_current_prtn();
530 
531 	/* This can be true only if we return from IRQ RPC */
532 	if (prtn && prtn->id == guest_id)
533 		return TEE_SUCCESS;
534 
535 	if (prtn)
536 		panic("Virtual guest partition is already set");
537 
538 	prtn = virt_get_guest(guest_id);
539 	if (!prtn)
540 		return TEE_ERROR_ITEM_NOT_FOUND;
541 
542 	set_current_prtn(prtn);
543 	core_mmu_set_prtn(prtn->mmu_prtn);
544 
545 	return TEE_SUCCESS;
546 }
547 
548 void virt_unset_guest(void)
549 {
550 	struct guest_partition *prtn = get_current_prtn();
551 
552 	if (!prtn)
553 		return;
554 
555 	set_current_prtn(NULL);
556 	core_mmu_set_default_prtn();
557 	virt_put_guest(prtn);
558 }
559 
560 void virt_on_stdcall(void)
561 {
562 	struct guest_partition *prtn = get_current_prtn();
563 
564 	/* Initialize runtime on first std call */
565 	if (!prtn->runtime_initialized) {
566 		mutex_lock(&prtn->mutex);
567 		if (!prtn->runtime_initialized) {
568 			init_tee_runtime();
569 			prtn->runtime_initialized = true;
570 		}
571 		mutex_unlock(&prtn->mutex);
572 	}
573 }
574 
575 struct tee_mmap_region *virt_get_memory_map(void)
576 {
577 	struct guest_partition *prtn;
578 
579 	prtn = get_current_prtn();
580 
581 	if (!prtn)
582 		return NULL;
583 
584 	return prtn->memory_map;
585 }
586 
587 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
588 {
589 	struct guest_partition *prtn = get_current_prtn();
590 
591 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
592 				       MEM_AREA_TA_RAM,
593 				       tee_mm_get_bytes(prtn->ta_ram));
594 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
595 }
596 
597 #ifdef CFG_CORE_SEL1_SPMC
598 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
599 {
600 	int i = 0;
601 
602 	for (i = 0; i < prtn->cookie_count; i++)
603 		if (prtn->cookies[i] == cookie)
604 			return i;
605 	return -1;
606 }
607 
608 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
609 {
610 	struct guest_partition *prtn = NULL;
611 	int i = 0;
612 
613 	LIST_FOREACH(prtn, &prtn_list, link) {
614 		i = find_cookie(prtn, cookie);
615 		if (i >= 0) {
616 			if (idx)
617 				*idx = i;
618 			return prtn;
619 		}
620 	}
621 
622 	return NULL;
623 }
624 
625 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
626 {
627 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
628 	struct guest_partition *prtn = NULL;
629 	uint32_t exceptions = 0;
630 
631 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
632 	if (find_prtn_cookie(cookie, NULL))
633 		goto out;
634 
635 	prtn = current_partition[get_core_pos()];
636 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
637 		prtn->cookies[prtn->cookie_count] = cookie;
638 		prtn->cookie_count++;
639 		res = TEE_SUCCESS;
640 	}
641 out:
642 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
643 
644 	return res;
645 }
646 
647 void virt_remove_cookie(uint64_t cookie)
648 {
649 	struct guest_partition *prtn = NULL;
650 	uint32_t exceptions = 0;
651 	int i = 0;
652 
653 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
654 	prtn = find_prtn_cookie(cookie, &i);
655 	if (prtn) {
656 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
657 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
658 		prtn->cookie_count--;
659 	}
660 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
661 }
662 
663 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
664 {
665 	struct guest_partition *prtn = NULL;
666 	uint32_t exceptions = 0;
667 	uint16_t ret = 0;
668 
669 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
670 	prtn = find_prtn_cookie(cookie, NULL);
671 	if (prtn)
672 		ret = prtn->id;
673 
674 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
675 
676 	return ret;
677 }
678 
679 bitstr_t *virt_get_shm_bits(void)
680 {
681 	return get_current_prtn()->shm_bits;
682 }
683 
684 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
685 {
686 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
687 		size_t n = 0;
688 
689 		for (n = 0; n < prtn->cookie_count; n++) {
690 			if (prtn->cookies[n] == cookie) {
691 				memmove(prtn->cookies + n,
692 					prtn->cookies + n + 1,
693 					sizeof(uint64_t) *
694 						(prtn->cookie_count - n - 1));
695 				prtn->cookie_count--;
696 				return TEE_SUCCESS;
697 			}
698 		}
699 	} else {
700 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
701 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
702 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
703 		int64_t i = cookie & ~mask;
704 
705 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
706 		    bit_test(prtn->shm_bits, i)) {
707 			bit_clear(prtn->shm_bits, i);
708 			return TEE_SUCCESS;
709 		}
710 	}
711 
712 	return TEE_ERROR_ITEM_NOT_FOUND;
713 }
714 
715 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
716 						    uint64_t cookie)
717 
718 {
719 	struct guest_partition *prtn = NULL;
720 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
721 	uint32_t exceptions = 0;
722 
723 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
724 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
725 		if (prtn->id == guest_id) {
726 			res = reclaim_cookie(prtn, cookie);
727 			if (prtn_have_remaining_resources(prtn))
728 				prtn = NULL;
729 			else
730 				LIST_REMOVE(prtn, link);
731 			break;
732 		}
733 	}
734 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
735 
736 	nex_free(prtn);
737 
738 	return res;
739 }
740 #endif /*CFG_CORE_SEL1_SPMC*/
741 
742 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
743 				    void (*data_destroy)(void *data))
744 {
745 	void *p = NULL;
746 
747 	/*
748 	 * This function only executes successfully in a single threaded
749 	 * environment before exiting to the normal world the first time.
750 	 * If add_disabled is true, it means we're not in this environment
751 	 * any longer.
752 	 */
753 
754 	if (add_disabled)
755 		return TEE_ERROR_BAD_PARAMETERS;
756 
757 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
758 	if (!p)
759 		return TEE_ERROR_OUT_OF_MEMORY;
760 	gsd_array = p;
761 
762 	gsd_array[gsd_count] = (struct guest_spec_data){
763 		.size = data_size,
764 		.destroy = data_destroy,
765 	};
766 	*data_id = gsd_count + 1;
767 	gsd_count++;
768 	return TEE_SUCCESS;
769 }
770 
771 void *virt_get_guest_spec_data(struct guest_partition *prtn,
772 			       unsigned int data_id)
773 {
774 	assert(data_id);
775 	if (!data_id || !prtn || data_id > gsd_count)
776 		return NULL;
777 	return prtn->data_array[data_id - 1];
778 }
779 
780 static TEE_Result virt_disable_add(void)
781 {
782 	add_disabled = true;
783 
784 	return TEE_SUCCESS;
785 }
786 nex_release_init_resource(virt_disable_add);
787