xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision efcc90b2b45900841a7313a74fbe43599616f1e4)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/tee_mm.h>
22 #include <platform_config.h>
23 #include <sm/optee_smc.h>
24 #include <string.h>
25 #include <string_ext.h>
26 #include <util.h>
27 
28 LIST_HEAD(prtn_list_head, guest_partition);
29 
30 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
31 
32 static struct prtn_list_head prtn_list __nex_data =
33 	LIST_HEAD_INITIALIZER(prtn_list);
34 static struct prtn_list_head prtn_destroy_list __nex_data =
35 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
36 
37 /* Free pages used for guest partitions */
38 tee_mm_pool_t virt_mapper_pool __nex_bss;
39 
40 /* Memory used by OP-TEE core */
41 struct memory_map *kmem_map __nex_bss;
42 
43 struct guest_spec_data {
44 	size_t size;
45 	void (*destroy)(void *data);
46 };
47 
48 static bool add_disabled __nex_bss;
49 static unsigned gsd_count __nex_bss;
50 static struct guest_spec_data *gsd_array __nex_bss;
51 
52 struct guest_partition {
53 	LIST_ENTRY(guest_partition) link;
54 	struct mmu_partition *mmu_prtn;
55 	struct memory_map mem_map;
56 	struct mutex mutex;
57 	void *tables_va;
58 	tee_mm_entry_t *tee_ram;
59 	tee_mm_entry_t *ta_ram;
60 	tee_mm_entry_t *tables;
61 	bool runtime_initialized;
62 	bool got_guest_destroyed;
63 	bool shutting_down;
64 	uint16_t id;
65 	struct refcount refc;
66 #ifdef CFG_CORE_SEL1_SPMC
67 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
68 	uint8_t cookie_count;
69 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
70 #endif
71 	void **data_array;
72 };
73 
74 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
75 
76 static struct guest_partition *get_current_prtn(void)
77 {
78 	struct guest_partition *ret;
79 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
80 
81 	ret = current_partition[get_core_pos()];
82 
83 	thread_unmask_exceptions(exceptions);
84 
85 	return ret;
86 }
87 
88 uint16_t virt_get_current_guest_id(void)
89 {
90 	struct guest_partition *prtn = get_current_prtn();
91 
92 	if (!prtn)
93 		return 0;
94 	return prtn->id;
95 }
96 
97 static void set_current_prtn(struct guest_partition *prtn)
98 {
99 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
100 
101 	current_partition[get_core_pos()] = prtn;
102 
103 	thread_unmask_exceptions(exceptions);
104 }
105 
106 static size_t get_ta_ram_size(void)
107 {
108 	size_t ta_size = 0;
109 
110 	core_mmu_get_ta_range(NULL, &ta_size);
111 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
112 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
113 }
114 
115 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
116 				     paddr_t tee_data, paddr_t ta_ram)
117 {
118 	struct tee_mmap_region *map = NULL;
119 	vaddr_t max_va = 0;
120 	size_t n = 0;
121 	/*
122 	 * This function assumes that at time of operation,
123 	 * kmemory_map (aka static_memory_map from core_mmu.c)
124 	 * will not be altered. This is true, because all
125 	 * changes to static_memory_map are done during
126 	 * OP-TEE initialization, while this function will
127 	 * called when hypervisor creates a guest.
128 	 */
129 
130 	/* Allocate entries for virtual guest map */
131 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
132 	if (!mem_map->map)
133 		return TEE_ERROR_OUT_OF_MEMORY;
134 	mem_map->count = kmem_map->count;
135 	mem_map->alloc_count = kmem_map->count + 1;
136 
137 	memcpy(mem_map->map, kmem_map->map,
138 	       sizeof(*mem_map->map) * mem_map->count);
139 
140 	/* Map TEE .data and .bss sections */
141 	for (n = 0; n < mem_map->count; n++) {
142 		map = mem_map->map + n;
143 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
144 			map->type = MEM_AREA_TEE_RAM_RW;
145 			map->attr = core_mmu_type_to_attr(map->type);
146 			map->pa = tee_data;
147 		}
148 		if (map->va + map->size > max_va)
149 			max_va = map->va + map->size;
150 	}
151 
152 	/* Map TA_RAM */
153 	mem_map->count++;
154 	map = ins_array_elem(mem_map->map, mem_map->count,
155 			     sizeof(*mem_map->map), n, NULL);
156 	map->region_size = SMALL_PAGE_SIZE;
157 	map->va = ROUNDUP(max_va, map->region_size);
158 	map->va += (ta_ram - map->va) & CORE_MMU_PGDIR_MASK;
159 	map->pa = ta_ram;
160 	map->size = get_ta_ram_size();
161 	map->type = MEM_AREA_TA_RAM;
162 	map->attr = core_mmu_type_to_attr(map->type);
163 
164 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
165 
166 	for (n = 0; n < mem_map->count; n++)
167 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
168 		     teecore_memtype_name(mem_map->map[n].type),
169 		     mem_map->map[n].region_size, mem_map->map[n].pa,
170 		     mem_map->map[n].va, mem_map->map[n].size,
171 		     mem_map->map[n].attr);
172 	return TEE_SUCCESS;
173 }
174 
175 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
176 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
177 		      paddr_size_t secmem1_size)
178 {
179 	paddr_size_t size = secmem0_size;
180 	paddr_t base = secmem0_base;
181 	size_t n = 0;
182 
183 	if (secmem1_size) {
184 		assert(secmem0_base + secmem0_size <= secmem1_base);
185 		size = secmem1_base + secmem1_size - base;
186 	}
187 
188 	/* Init page pool that covers all secure RAM */
189 	if (!tee_mm_init(&virt_mapper_pool, base, size,
190 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
191 		panic("Can't create pool with free pages");
192 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
193 	     base, base + size);
194 
195 	if (secmem1_size) {
196 		/* Carve out an eventual gap between secmem0 and secmem1 */
197 		base = secmem0_base + secmem0_size;
198 		size = secmem1_base - base;
199 		if (size) {
200 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
201 			     base, size);
202 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
203 				panic("Can't carve out secmem gap");
204 		}
205 	}
206 
207 
208 	/* Carve out areas that are used by OP-TEE core */
209 	for (n = 0; n < mem_map->count; n++) {
210 		struct tee_mmap_region *map = mem_map->map + n;
211 
212 		switch (map->type) {
213 		case MEM_AREA_TEE_RAM_RX:
214 		case MEM_AREA_TEE_RAM_RO:
215 		case MEM_AREA_NEX_RAM_RO:
216 		case MEM_AREA_NEX_RAM_RW:
217 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
218 			     map->type, map->pa, map->pa + map->size);
219 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
220 					   map->size))
221 				panic("Can't carve out used area");
222 			break;
223 		default:
224 			continue;
225 		}
226 	}
227 
228 	kmem_map = mem_map;
229 }
230 
231 
232 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
233 {
234 	TEE_Result res = TEE_SUCCESS;
235 	paddr_t original_data_pa = 0;
236 
237 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
238 	if (!prtn->tee_ram) {
239 		EMSG("Can't allocate memory for TEE runtime context");
240 		res = TEE_ERROR_OUT_OF_MEMORY;
241 		goto err;
242 	}
243 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
244 
245 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
246 	if (!prtn->ta_ram) {
247 		EMSG("Can't allocate memory for TA data");
248 		res = TEE_ERROR_OUT_OF_MEMORY;
249 		goto err;
250 	}
251 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
252 
253 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
254 				   core_mmu_get_total_pages_size());
255 	if (!prtn->tables) {
256 		EMSG("Can't allocate memory for page tables");
257 		res = TEE_ERROR_OUT_OF_MEMORY;
258 		goto err;
259 	}
260 
261 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
262 				      MEM_AREA_SEC_RAM_OVERALL,
263 				      core_mmu_get_total_pages_size());
264 	assert(prtn->tables_va);
265 
266 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
267 	if (!prtn->mmu_prtn) {
268 		res = TEE_ERROR_OUT_OF_MEMORY;
269 		goto err;
270 	}
271 
272 	res = prepare_memory_map(&prtn->mem_map, tee_mm_get_smem(prtn->tee_ram),
273 				 tee_mm_get_smem(prtn->ta_ram));
274 	if (res)
275 		goto err;
276 
277 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
278 
279 	original_data_pa = virt_to_phys(__data_start);
280 	/* Switch to guest's mappings */
281 	core_mmu_set_prtn(prtn->mmu_prtn);
282 
283 	/* clear .bss */
284 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
285 
286 	/* copy .data section from R/O original */
287 	memcpy(__data_start,
288 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
289 			    __data_end - __data_start),
290 	       __data_end - __data_start);
291 
292 	return TEE_SUCCESS;
293 
294 err:
295 	if (prtn->tee_ram)
296 		tee_mm_free(prtn->tee_ram);
297 	if (prtn->ta_ram)
298 		tee_mm_free(prtn->ta_ram);
299 	if (prtn->tables)
300 		tee_mm_free(prtn->tables);
301 	nex_free(prtn->mmu_prtn);
302 	nex_free(prtn->mem_map.map);
303 
304 	return res;
305 }
306 
307 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
308 {
309 	size_t n = 0;
310 
311 	for (n = 0; n < gsd_count; n++) {
312 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
313 			gsd_array[n].destroy(prtn->data_array[n]);
314 		nex_free(prtn->data_array[n]);
315 	}
316 	nex_free(prtn->data_array);
317 	prtn->data_array = NULL;
318 }
319 
320 static TEE_Result alloc_gsd(struct guest_partition *prtn)
321 {
322 	unsigned int n = 0;
323 
324 	if (!gsd_count)
325 		return TEE_SUCCESS;
326 
327 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
328 	if (!prtn->data_array)
329 		return TEE_ERROR_OUT_OF_MEMORY;
330 
331 	for (n = 0; n < gsd_count; n++) {
332 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
333 		if (!prtn->data_array[n]) {
334 			destroy_gsd(prtn, true /*free_only*/);
335 			return TEE_ERROR_OUT_OF_MEMORY;
336 		}
337 	}
338 
339 	return TEE_SUCCESS;
340 }
341 TEE_Result virt_guest_created(uint16_t guest_id)
342 {
343 	struct guest_partition *prtn = NULL;
344 	TEE_Result res = TEE_SUCCESS;
345 	uint32_t exceptions = 0;
346 
347 	prtn = nex_calloc(1, sizeof(*prtn));
348 	if (!prtn)
349 		return TEE_ERROR_OUT_OF_MEMORY;
350 
351 	res = alloc_gsd(prtn);
352 	if (res)
353 		goto err_free_prtn;
354 
355 	prtn->id = guest_id;
356 	mutex_init(&prtn->mutex);
357 	refcount_set(&prtn->refc, 1);
358 	res = configure_guest_prtn_mem(prtn);
359 	if (res)
360 		goto err_free_gsd;
361 
362 	set_current_prtn(prtn);
363 
364 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
365 	/* Initialize threads */
366 	thread_init_threads();
367 	/* Do the preinitcalls */
368 	call_preinitcalls();
369 
370 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
371 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
372 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
373 
374 	IMSG("Added guest %d", guest_id);
375 
376 	set_current_prtn(NULL);
377 	core_mmu_set_default_prtn();
378 
379 	return TEE_SUCCESS;
380 
381 err_free_gsd:
382 	destroy_gsd(prtn, true /*free_only*/);
383 err_free_prtn:
384 	nex_free(prtn);
385 	return res;
386 }
387 
388 static bool
389 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
390 {
391 #ifdef CFG_CORE_SEL1_SPMC
392 	int i = 0;
393 
394 	if (prtn->cookie_count)
395 		return true;
396 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
397 	return i >= 0;
398 #else
399 	return false;
400 #endif
401 }
402 
403 static void get_prtn(struct guest_partition *prtn)
404 {
405 	if (!refcount_inc(&prtn->refc))
406 		panic();
407 }
408 
409 uint16_t virt_get_guest_id(struct guest_partition *prtn)
410 {
411 	if (!prtn)
412 		return 0;
413 	return prtn->id;
414 }
415 
416 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
417 {
418 	struct guest_partition *prtn = NULL;
419 
420 	LIST_FOREACH(prtn, &prtn_list, link)
421 		if (!prtn->shutting_down && prtn->id == guest_id)
422 			return prtn;
423 
424 	return NULL;
425 }
426 
427 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
428 {
429 	struct guest_partition *ret = NULL;
430 	uint32_t exceptions = 0;
431 
432 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
433 	if (prtn)
434 		ret = LIST_NEXT(prtn, link);
435 	else
436 		ret = LIST_FIRST(&prtn_list);
437 
438 	while (ret && ret->shutting_down)
439 		ret = LIST_NEXT(prtn, link);
440 	if (ret)
441 		get_prtn(ret);
442 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
443 
444 	virt_put_guest(prtn);
445 
446 	return ret;
447 }
448 
449 struct guest_partition *virt_get_current_guest(void)
450 {
451 	struct guest_partition *prtn = get_current_prtn();
452 
453 	if (prtn)
454 		get_prtn(prtn);
455 	return prtn;
456 }
457 
458 struct guest_partition *virt_get_guest(uint16_t guest_id)
459 {
460 	struct guest_partition *prtn = NULL;
461 	uint32_t exceptions = 0;
462 
463 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
464 	prtn = find_guest_by_id_unlocked(guest_id);
465 	if (prtn)
466 		get_prtn(prtn);
467 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
468 
469 	return prtn;
470 }
471 
472 void virt_put_guest(struct guest_partition *prtn)
473 {
474 	if (prtn && refcount_dec(&prtn->refc)) {
475 		uint32_t exceptions = 0;
476 		bool do_free = true;
477 
478 		assert(prtn->shutting_down);
479 
480 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
481 		LIST_REMOVE(prtn, link);
482 		if (prtn_have_remaining_resources(prtn)) {
483 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
484 			/*
485 			 * Delay the nex_free() until
486 			 * virt_reclaim_cookie_from_destroyed_guest()
487 			 * is done with this partition.
488 			 */
489 			do_free = false;
490 		}
491 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
492 
493 		destroy_gsd(prtn, false /*!free_only*/);
494 		tee_mm_free(prtn->tee_ram);
495 		prtn->tee_ram = NULL;
496 		tee_mm_free(prtn->ta_ram);
497 		prtn->ta_ram = NULL;
498 		tee_mm_free(prtn->tables);
499 		prtn->tables = NULL;
500 		core_free_mmu_prtn(prtn->mmu_prtn);
501 		prtn->mmu_prtn = NULL;
502 		nex_free(prtn->mem_map.map);
503 		prtn->mem_map.map = NULL;
504 		if (do_free)
505 			nex_free(prtn);
506 	}
507 }
508 
509 TEE_Result virt_guest_destroyed(uint16_t guest_id)
510 {
511 	struct guest_partition *prtn = NULL;
512 	uint32_t exceptions = 0;
513 
514 	IMSG("Removing guest %"PRId16, guest_id);
515 
516 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
517 
518 	prtn = find_guest_by_id_unlocked(guest_id);
519 	if (prtn && !prtn->got_guest_destroyed)
520 		prtn->got_guest_destroyed = true;
521 	else
522 		prtn = NULL;
523 
524 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
525 
526 	if (prtn) {
527 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
528 
529 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
530 		prtn->shutting_down = true;
531 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
532 
533 		virt_put_guest(prtn);
534 	} else {
535 		EMSG("Client with id %d is not found", guest_id);
536 	}
537 
538 	return TEE_SUCCESS;
539 }
540 
541 TEE_Result virt_set_guest(uint16_t guest_id)
542 {
543 	struct guest_partition *prtn = get_current_prtn();
544 
545 	/* This can be true only if we return from IRQ RPC */
546 	if (prtn && prtn->id == guest_id)
547 		return TEE_SUCCESS;
548 
549 	if (prtn)
550 		panic("Virtual guest partition is already set");
551 
552 	prtn = virt_get_guest(guest_id);
553 	if (!prtn)
554 		return TEE_ERROR_ITEM_NOT_FOUND;
555 
556 	set_current_prtn(prtn);
557 	core_mmu_set_prtn(prtn->mmu_prtn);
558 
559 	return TEE_SUCCESS;
560 }
561 
562 void virt_unset_guest(void)
563 {
564 	struct guest_partition *prtn = get_current_prtn();
565 
566 	if (!prtn)
567 		return;
568 
569 	set_current_prtn(NULL);
570 	core_mmu_set_default_prtn();
571 	virt_put_guest(prtn);
572 }
573 
574 void virt_on_stdcall(void)
575 {
576 	struct guest_partition *prtn = get_current_prtn();
577 
578 	/* Initialize runtime on first std call */
579 	if (!prtn->runtime_initialized) {
580 		mutex_lock(&prtn->mutex);
581 		if (!prtn->runtime_initialized) {
582 			init_tee_runtime();
583 			call_driver_initcalls();
584 			prtn->runtime_initialized = true;
585 		}
586 		mutex_unlock(&prtn->mutex);
587 	}
588 }
589 
590 struct memory_map *virt_get_memory_map(void)
591 {
592 	struct guest_partition *prtn;
593 
594 	prtn = get_current_prtn();
595 
596 	if (!prtn)
597 		return NULL;
598 
599 	return &prtn->mem_map;
600 }
601 
602 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
603 {
604 	struct guest_partition *prtn = get_current_prtn();
605 
606 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
607 				       MEM_AREA_TA_RAM,
608 				       tee_mm_get_bytes(prtn->ta_ram));
609 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
610 }
611 
612 #ifdef CFG_CORE_SEL1_SPMC
613 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
614 {
615 	int i = 0;
616 
617 	for (i = 0; i < prtn->cookie_count; i++)
618 		if (prtn->cookies[i] == cookie)
619 			return i;
620 	return -1;
621 }
622 
623 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
624 {
625 	struct guest_partition *prtn = NULL;
626 	int i = 0;
627 
628 	LIST_FOREACH(prtn, &prtn_list, link) {
629 		i = find_cookie(prtn, cookie);
630 		if (i >= 0) {
631 			if (idx)
632 				*idx = i;
633 			return prtn;
634 		}
635 	}
636 
637 	return NULL;
638 }
639 
640 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
641 {
642 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
643 	struct guest_partition *prtn = NULL;
644 	uint32_t exceptions = 0;
645 
646 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
647 	if (find_prtn_cookie(cookie, NULL))
648 		goto out;
649 
650 	prtn = current_partition[get_core_pos()];
651 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
652 		prtn->cookies[prtn->cookie_count] = cookie;
653 		prtn->cookie_count++;
654 		res = TEE_SUCCESS;
655 	}
656 out:
657 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
658 
659 	return res;
660 }
661 
662 void virt_remove_cookie(uint64_t cookie)
663 {
664 	struct guest_partition *prtn = NULL;
665 	uint32_t exceptions = 0;
666 	int i = 0;
667 
668 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
669 	prtn = find_prtn_cookie(cookie, &i);
670 	if (prtn) {
671 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
672 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
673 		prtn->cookie_count--;
674 	}
675 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
676 }
677 
678 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
679 {
680 	struct guest_partition *prtn = NULL;
681 	uint32_t exceptions = 0;
682 	uint16_t ret = 0;
683 
684 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
685 	prtn = find_prtn_cookie(cookie, NULL);
686 	if (prtn)
687 		ret = prtn->id;
688 
689 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
690 
691 	return ret;
692 }
693 
694 bitstr_t *virt_get_shm_bits(void)
695 {
696 	return get_current_prtn()->shm_bits;
697 }
698 
699 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
700 {
701 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
702 		size_t n = 0;
703 
704 		for (n = 0; n < prtn->cookie_count; n++) {
705 			if (prtn->cookies[n] == cookie) {
706 				memmove(prtn->cookies + n,
707 					prtn->cookies + n + 1,
708 					sizeof(uint64_t) *
709 						(prtn->cookie_count - n - 1));
710 				prtn->cookie_count--;
711 				return TEE_SUCCESS;
712 			}
713 		}
714 	} else {
715 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
716 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
717 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
718 		int64_t i = cookie & ~mask;
719 
720 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
721 		    bit_test(prtn->shm_bits, i)) {
722 			bit_clear(prtn->shm_bits, i);
723 			return TEE_SUCCESS;
724 		}
725 	}
726 
727 	return TEE_ERROR_ITEM_NOT_FOUND;
728 }
729 
730 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
731 						    uint64_t cookie)
732 
733 {
734 	struct guest_partition *prtn = NULL;
735 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
736 	uint32_t exceptions = 0;
737 
738 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
739 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
740 		if (prtn->id == guest_id) {
741 			res = reclaim_cookie(prtn, cookie);
742 			if (prtn_have_remaining_resources(prtn))
743 				prtn = NULL;
744 			else
745 				LIST_REMOVE(prtn, link);
746 			break;
747 		}
748 	}
749 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
750 
751 	nex_free(prtn);
752 
753 	return res;
754 }
755 #endif /*CFG_CORE_SEL1_SPMC*/
756 
757 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
758 				    void (*data_destroy)(void *data))
759 {
760 	void *p = NULL;
761 
762 	/*
763 	 * This function only executes successfully in a single threaded
764 	 * environment before exiting to the normal world the first time.
765 	 * If add_disabled is true, it means we're not in this environment
766 	 * any longer.
767 	 */
768 
769 	if (add_disabled)
770 		return TEE_ERROR_BAD_PARAMETERS;
771 
772 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
773 	if (!p)
774 		return TEE_ERROR_OUT_OF_MEMORY;
775 	gsd_array = p;
776 
777 	gsd_array[gsd_count] = (struct guest_spec_data){
778 		.size = data_size,
779 		.destroy = data_destroy,
780 	};
781 	*data_id = gsd_count + 1;
782 	gsd_count++;
783 	return TEE_SUCCESS;
784 }
785 
786 void *virt_get_guest_spec_data(struct guest_partition *prtn,
787 			       unsigned int data_id)
788 {
789 	assert(data_id);
790 	if (!data_id || !prtn || data_id > gsd_count)
791 		return NULL;
792 	return prtn->data_array[data_id - 1];
793 }
794 
795 static TEE_Result virt_disable_add(void)
796 {
797 	add_disabled = true;
798 
799 	return TEE_SUCCESS;
800 }
801 nex_release_init_resource(virt_disable_add);
802