xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision d237e616e155e6127ff2399ac5cf90655624b0e9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/tee_mm.h>
22 #include <platform_config.h>
23 #include <sm/optee_smc.h>
24 #include <string.h>
25 #include <util.h>
26 
27 LIST_HEAD(prtn_list_head, guest_partition);
28 
29 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
30 
31 static struct prtn_list_head prtn_list __nex_data =
32 	LIST_HEAD_INITIALIZER(prtn_list);
33 static struct prtn_list_head prtn_destroy_list __nex_data =
34 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
35 
36 /* Free pages used for guest partitions */
37 tee_mm_pool_t virt_mapper_pool __nex_bss;
38 
39 /* Memory used by OP-TEE core */
40 struct tee_mmap_region *kmemory_map __nex_bss;
41 
42 struct guest_spec_data {
43 	size_t size;
44 	void (*destroy)(void *data);
45 };
46 
47 static bool add_disabled __nex_bss;
48 static unsigned gsd_count __nex_bss;
49 static struct guest_spec_data *gsd_array __nex_bss;
50 
51 struct guest_partition {
52 	LIST_ENTRY(guest_partition) link;
53 	struct mmu_partition *mmu_prtn;
54 	struct tee_mmap_region *memory_map;
55 	struct mutex mutex;
56 	void *tables_va;
57 	tee_mm_entry_t *tee_ram;
58 	tee_mm_entry_t *ta_ram;
59 	tee_mm_entry_t *tables;
60 	bool runtime_initialized;
61 	bool got_guest_destroyed;
62 	bool shutting_down;
63 	uint16_t id;
64 	struct refcount refc;
65 #ifdef CFG_CORE_SEL1_SPMC
66 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
67 	uint8_t cookie_count;
68 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
69 #endif
70 	void **data_array;
71 };
72 
73 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
74 
75 static struct guest_partition *get_current_prtn(void)
76 {
77 	struct guest_partition *ret;
78 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
79 
80 	ret = current_partition[get_core_pos()];
81 
82 	thread_unmask_exceptions(exceptions);
83 
84 	return ret;
85 }
86 
87 uint16_t virt_get_current_guest_id(void)
88 {
89 	struct guest_partition *prtn = get_current_prtn();
90 
91 	if (!prtn)
92 		return 0;
93 	return prtn->id;
94 }
95 
96 static void set_current_prtn(struct guest_partition *prtn)
97 {
98 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
99 
100 	current_partition[get_core_pos()] = prtn;
101 
102 	thread_unmask_exceptions(exceptions);
103 }
104 
105 static size_t get_ta_ram_size(void)
106 {
107 	size_t ta_size = 0;
108 
109 	core_mmu_get_ta_range(NULL, &ta_size);
110 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
111 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
112 }
113 
114 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
115 						  paddr_t ta_ram)
116 {
117 	int i, entries;
118 	vaddr_t max_va = 0;
119 	struct tee_mmap_region *map;
120 	/*
121 	 * This function assumes that at time of operation,
122 	 * kmemory_map (aka static_memory_map from core_mmu.c)
123 	 * will not be altered. This is true, because all
124 	 * changes to static_memory_map are done during
125 	 * OP-TEE initialization, while this function will
126 	 * called when hypervisor creates a guest.
127 	 */
128 
129 	/* Count number of entries in nexus memory map */
130 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
131 	     map++, entries++)
132 		;
133 
134 	/* Allocate entries for virtual guest map */
135 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
136 	if (!map)
137 		return NULL;
138 
139 	memcpy(map, kmemory_map, sizeof(*map) * entries);
140 
141 	/* Map TEE .data and .bss sections */
142 	for (i = 0; i < entries; i++) {
143 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
144 			map[i].type = MEM_AREA_TEE_RAM_RW;
145 			map[i].attr = core_mmu_type_to_attr(map[i].type);
146 			map[i].pa = tee_data;
147 		}
148 		if (map[i].va + map[i].size > max_va)
149 			max_va = map[i].va + map[i].size;
150 	}
151 
152 	/* Map TA_RAM */
153 	assert(map[entries - 1].type == MEM_AREA_END);
154 	map[entries] = map[entries - 1];
155 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
156 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
157 	map[entries - 1].va +=
158 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
159 	map[entries - 1].pa = ta_ram;
160 	map[entries - 1].size = get_ta_ram_size();
161 	map[entries - 1].type = MEM_AREA_TA_RAM;
162 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
163 
164 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
165 
166 	for (i = 0; i < entries; i++)
167 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
168 		     teecore_memtype_name(map[i].type),
169 		     map[i].region_size, map[i].pa, map[i].va,
170 		     map[i].size, map[i].attr);
171 	return map;
172 }
173 
174 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
175 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
176 		      paddr_size_t secmem1_size)
177 {
178 	struct tee_mmap_region *map = NULL;
179 	paddr_size_t size = secmem0_size;
180 	paddr_t base = secmem0_base;
181 
182 	if (secmem1_size) {
183 		assert(secmem0_base + secmem0_size <= secmem1_base);
184 		size = secmem1_base + secmem1_size - base;
185 	}
186 
187 	/* Init page pool that covers all secure RAM */
188 	if (!tee_mm_init(&virt_mapper_pool, base, size,
189 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
190 		panic("Can't create pool with free pages");
191 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
192 	     base, base + size);
193 
194 	if (secmem1_size) {
195 		/* Carve out an eventual gap between secmem0 and secmem1 */
196 		base = secmem0_base + secmem0_size;
197 		size = secmem1_base - base;
198 		if (size) {
199 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
200 			     base, size);
201 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
202 				panic("Can't carve out secmem gap");
203 		}
204 	}
205 
206 
207 	/* Carve out areas that are used by OP-TEE core */
208 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
209 		switch (map->type) {
210 		case MEM_AREA_TEE_RAM_RX:
211 		case MEM_AREA_TEE_RAM_RO:
212 		case MEM_AREA_NEX_RAM_RO:
213 		case MEM_AREA_NEX_RAM_RW:
214 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
215 			     map->type, map->pa, map->pa + map->size);
216 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
217 					   map->size))
218 				panic("Can't carve out used area");
219 			break;
220 		default:
221 			continue;
222 		}
223 	}
224 
225 	kmemory_map = memory_map;
226 }
227 
228 
229 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
230 {
231 	TEE_Result res = TEE_SUCCESS;
232 	paddr_t original_data_pa = 0;
233 
234 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
235 	if (!prtn->tee_ram) {
236 		EMSG("Can't allocate memory for TEE runtime context");
237 		res = TEE_ERROR_OUT_OF_MEMORY;
238 		goto err;
239 	}
240 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
241 
242 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
243 	if (!prtn->ta_ram) {
244 		EMSG("Can't allocate memory for TA data");
245 		res = TEE_ERROR_OUT_OF_MEMORY;
246 		goto err;
247 	}
248 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
249 
250 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
251 				   core_mmu_get_total_pages_size());
252 	if (!prtn->tables) {
253 		EMSG("Can't allocate memory for page tables");
254 		res = TEE_ERROR_OUT_OF_MEMORY;
255 		goto err;
256 	}
257 
258 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
259 				      MEM_AREA_SEC_RAM_OVERALL,
260 				      core_mmu_get_total_pages_size());
261 	assert(prtn->tables_va);
262 
263 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
264 	if (!prtn->mmu_prtn) {
265 		res = TEE_ERROR_OUT_OF_MEMORY;
266 		goto err;
267 	}
268 
269 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
270 					     tee_mm_get_smem(prtn->ta_ram));
271 	if (!prtn->memory_map) {
272 		res = TEE_ERROR_OUT_OF_MEMORY;
273 		goto err;
274 	}
275 
276 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
277 
278 	original_data_pa = virt_to_phys(__data_start);
279 	/* Switch to guest's mappings */
280 	core_mmu_set_prtn(prtn->mmu_prtn);
281 
282 	/* clear .bss */
283 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
284 
285 	/* copy .data section from R/O original */
286 	memcpy(__data_start,
287 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
288 			    __data_end - __data_start),
289 	       __data_end - __data_start);
290 
291 	return TEE_SUCCESS;
292 
293 err:
294 	if (prtn->tee_ram)
295 		tee_mm_free(prtn->tee_ram);
296 	if (prtn->ta_ram)
297 		tee_mm_free(prtn->ta_ram);
298 	if (prtn->tables)
299 		tee_mm_free(prtn->tables);
300 	nex_free(prtn->mmu_prtn);
301 	nex_free(prtn->memory_map);
302 
303 	return res;
304 }
305 
306 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
307 {
308 	size_t n = 0;
309 
310 	for (n = 0; n < gsd_count; n++) {
311 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
312 			gsd_array[n].destroy(prtn->data_array[n]);
313 		nex_free(prtn->data_array[n]);
314 	}
315 	nex_free(prtn->data_array);
316 	prtn->data_array = NULL;
317 }
318 
319 static TEE_Result alloc_gsd(struct guest_partition *prtn)
320 {
321 	unsigned int n = 0;
322 
323 	if (!gsd_count)
324 		return TEE_SUCCESS;
325 
326 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
327 	if (!prtn->data_array)
328 		return TEE_ERROR_OUT_OF_MEMORY;
329 
330 	for (n = 0; n < gsd_count; n++) {
331 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
332 		if (!prtn->data_array[n]) {
333 			destroy_gsd(prtn, true /*free_only*/);
334 			return TEE_ERROR_OUT_OF_MEMORY;
335 		}
336 	}
337 
338 	return TEE_SUCCESS;
339 }
340 TEE_Result virt_guest_created(uint16_t guest_id)
341 {
342 	struct guest_partition *prtn = NULL;
343 	TEE_Result res = TEE_SUCCESS;
344 	uint32_t exceptions = 0;
345 
346 	prtn = nex_calloc(1, sizeof(*prtn));
347 	if (!prtn)
348 		return TEE_ERROR_OUT_OF_MEMORY;
349 
350 	res = alloc_gsd(prtn);
351 	if (res)
352 		goto err_free_prtn;
353 
354 	prtn->id = guest_id;
355 	mutex_init(&prtn->mutex);
356 	refcount_set(&prtn->refc, 1);
357 	res = configure_guest_prtn_mem(prtn);
358 	if (res)
359 		goto err_free_gsd;
360 
361 	set_current_prtn(prtn);
362 
363 	/* Initialize threads */
364 	thread_init_threads();
365 	/* Do the preinitcalls */
366 	call_preinitcalls();
367 
368 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
369 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
370 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
371 
372 	IMSG("Added guest %d", guest_id);
373 
374 	set_current_prtn(NULL);
375 	core_mmu_set_default_prtn();
376 
377 	return TEE_SUCCESS;
378 
379 err_free_gsd:
380 	destroy_gsd(prtn, true /*free_only*/);
381 err_free_prtn:
382 	nex_free(prtn);
383 	return res;
384 }
385 
386 static bool
387 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
388 {
389 #ifdef CFG_CORE_SEL1_SPMC
390 	int i = 0;
391 
392 	if (prtn->cookie_count)
393 		return true;
394 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
395 	return i >= 0;
396 #else
397 	return false;
398 #endif
399 }
400 
401 static void get_prtn(struct guest_partition *prtn)
402 {
403 	if (!refcount_inc(&prtn->refc))
404 		panic();
405 }
406 
407 uint16_t virt_get_guest_id(struct guest_partition *prtn)
408 {
409 	if (!prtn)
410 		return 0;
411 	return prtn->id;
412 }
413 
414 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
415 {
416 	struct guest_partition *prtn = NULL;
417 
418 	LIST_FOREACH(prtn, &prtn_list, link)
419 		if (!prtn->shutting_down && prtn->id == guest_id)
420 			return prtn;
421 
422 	return NULL;
423 }
424 
425 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
426 {
427 	struct guest_partition *ret = NULL;
428 	uint32_t exceptions = 0;
429 
430 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
431 	if (prtn)
432 		ret = LIST_NEXT(prtn, link);
433 	else
434 		ret = LIST_FIRST(&prtn_list);
435 
436 	while (ret && ret->shutting_down)
437 		ret = LIST_NEXT(prtn, link);
438 	if (ret)
439 		get_prtn(ret);
440 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
441 
442 	virt_put_guest(prtn);
443 
444 	return ret;
445 }
446 
447 struct guest_partition *virt_get_current_guest(void)
448 {
449 	struct guest_partition *prtn = get_current_prtn();
450 
451 	if (prtn)
452 		get_prtn(prtn);
453 	return prtn;
454 }
455 
456 struct guest_partition *virt_get_guest(uint16_t guest_id)
457 {
458 	struct guest_partition *prtn = NULL;
459 	uint32_t exceptions = 0;
460 
461 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
462 	prtn = find_guest_by_id_unlocked(guest_id);
463 	if (prtn)
464 		get_prtn(prtn);
465 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
466 
467 	return prtn;
468 }
469 
470 void virt_put_guest(struct guest_partition *prtn)
471 {
472 	if (prtn && refcount_dec(&prtn->refc)) {
473 		uint32_t exceptions = 0;
474 		bool do_free = true;
475 
476 		assert(prtn->shutting_down);
477 
478 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
479 		LIST_REMOVE(prtn, link);
480 		if (prtn_have_remaining_resources(prtn)) {
481 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
482 			/*
483 			 * Delay the nex_free() until
484 			 * virt_reclaim_cookie_from_destroyed_guest()
485 			 * is done with this partition.
486 			 */
487 			do_free = false;
488 		}
489 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
490 
491 		destroy_gsd(prtn, false /*!free_only*/);
492 		tee_mm_free(prtn->tee_ram);
493 		prtn->tee_ram = NULL;
494 		tee_mm_free(prtn->ta_ram);
495 		prtn->ta_ram = NULL;
496 		tee_mm_free(prtn->tables);
497 		prtn->tables = NULL;
498 		core_free_mmu_prtn(prtn->mmu_prtn);
499 		prtn->mmu_prtn = NULL;
500 		nex_free(prtn->memory_map);
501 		prtn->memory_map = NULL;
502 		if (do_free)
503 			nex_free(prtn);
504 	}
505 }
506 
507 TEE_Result virt_guest_destroyed(uint16_t guest_id)
508 {
509 	struct guest_partition *prtn = NULL;
510 	uint32_t exceptions = 0;
511 
512 	IMSG("Removing guest %"PRId16, guest_id);
513 
514 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
515 
516 	prtn = find_guest_by_id_unlocked(guest_id);
517 	if (prtn && !prtn->got_guest_destroyed)
518 		prtn->got_guest_destroyed = true;
519 	else
520 		prtn = NULL;
521 
522 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
523 
524 	if (prtn) {
525 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
526 
527 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
528 		prtn->shutting_down = true;
529 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
530 
531 		virt_put_guest(prtn);
532 	} else {
533 		EMSG("Client with id %d is not found", guest_id);
534 	}
535 
536 	return TEE_SUCCESS;
537 }
538 
539 TEE_Result virt_set_guest(uint16_t guest_id)
540 {
541 	struct guest_partition *prtn = get_current_prtn();
542 
543 	/* This can be true only if we return from IRQ RPC */
544 	if (prtn && prtn->id == guest_id)
545 		return TEE_SUCCESS;
546 
547 	if (prtn)
548 		panic("Virtual guest partition is already set");
549 
550 	prtn = virt_get_guest(guest_id);
551 	if (!prtn)
552 		return TEE_ERROR_ITEM_NOT_FOUND;
553 
554 	set_current_prtn(prtn);
555 	core_mmu_set_prtn(prtn->mmu_prtn);
556 
557 	return TEE_SUCCESS;
558 }
559 
560 void virt_unset_guest(void)
561 {
562 	struct guest_partition *prtn = get_current_prtn();
563 
564 	if (!prtn)
565 		return;
566 
567 	set_current_prtn(NULL);
568 	core_mmu_set_default_prtn();
569 	virt_put_guest(prtn);
570 }
571 
572 void virt_on_stdcall(void)
573 {
574 	struct guest_partition *prtn = get_current_prtn();
575 
576 	/* Initialize runtime on first std call */
577 	if (!prtn->runtime_initialized) {
578 		mutex_lock(&prtn->mutex);
579 		if (!prtn->runtime_initialized) {
580 			init_tee_runtime();
581 			prtn->runtime_initialized = true;
582 		}
583 		mutex_unlock(&prtn->mutex);
584 	}
585 }
586 
587 struct tee_mmap_region *virt_get_memory_map(void)
588 {
589 	struct guest_partition *prtn;
590 
591 	prtn = get_current_prtn();
592 
593 	if (!prtn)
594 		return NULL;
595 
596 	return prtn->memory_map;
597 }
598 
599 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
600 {
601 	struct guest_partition *prtn = get_current_prtn();
602 
603 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
604 				       MEM_AREA_TA_RAM,
605 				       tee_mm_get_bytes(prtn->ta_ram));
606 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
607 }
608 
609 #ifdef CFG_CORE_SEL1_SPMC
610 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
611 {
612 	int i = 0;
613 
614 	for (i = 0; i < prtn->cookie_count; i++)
615 		if (prtn->cookies[i] == cookie)
616 			return i;
617 	return -1;
618 }
619 
620 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
621 {
622 	struct guest_partition *prtn = NULL;
623 	int i = 0;
624 
625 	LIST_FOREACH(prtn, &prtn_list, link) {
626 		i = find_cookie(prtn, cookie);
627 		if (i >= 0) {
628 			if (idx)
629 				*idx = i;
630 			return prtn;
631 		}
632 	}
633 
634 	return NULL;
635 }
636 
637 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
638 {
639 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
640 	struct guest_partition *prtn = NULL;
641 	uint32_t exceptions = 0;
642 
643 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
644 	if (find_prtn_cookie(cookie, NULL))
645 		goto out;
646 
647 	prtn = current_partition[get_core_pos()];
648 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
649 		prtn->cookies[prtn->cookie_count] = cookie;
650 		prtn->cookie_count++;
651 		res = TEE_SUCCESS;
652 	}
653 out:
654 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
655 
656 	return res;
657 }
658 
659 void virt_remove_cookie(uint64_t cookie)
660 {
661 	struct guest_partition *prtn = NULL;
662 	uint32_t exceptions = 0;
663 	int i = 0;
664 
665 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
666 	prtn = find_prtn_cookie(cookie, &i);
667 	if (prtn) {
668 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
669 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
670 		prtn->cookie_count--;
671 	}
672 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
673 }
674 
675 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
676 {
677 	struct guest_partition *prtn = NULL;
678 	uint32_t exceptions = 0;
679 	uint16_t ret = 0;
680 
681 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
682 	prtn = find_prtn_cookie(cookie, NULL);
683 	if (prtn)
684 		ret = prtn->id;
685 
686 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
687 
688 	return ret;
689 }
690 
691 bitstr_t *virt_get_shm_bits(void)
692 {
693 	return get_current_prtn()->shm_bits;
694 }
695 
696 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
697 {
698 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
699 		size_t n = 0;
700 
701 		for (n = 0; n < prtn->cookie_count; n++) {
702 			if (prtn->cookies[n] == cookie) {
703 				memmove(prtn->cookies + n,
704 					prtn->cookies + n + 1,
705 					sizeof(uint64_t) *
706 						(prtn->cookie_count - n - 1));
707 				prtn->cookie_count--;
708 				return TEE_SUCCESS;
709 			}
710 		}
711 	} else {
712 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
713 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
714 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
715 		int64_t i = cookie & ~mask;
716 
717 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
718 		    bit_test(prtn->shm_bits, i)) {
719 			bit_clear(prtn->shm_bits, i);
720 			return TEE_SUCCESS;
721 		}
722 	}
723 
724 	return TEE_ERROR_ITEM_NOT_FOUND;
725 }
726 
727 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
728 						    uint64_t cookie)
729 
730 {
731 	struct guest_partition *prtn = NULL;
732 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
733 	uint32_t exceptions = 0;
734 
735 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
736 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
737 		if (prtn->id == guest_id) {
738 			res = reclaim_cookie(prtn, cookie);
739 			if (prtn_have_remaining_resources(prtn))
740 				prtn = NULL;
741 			else
742 				LIST_REMOVE(prtn, link);
743 			break;
744 		}
745 	}
746 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
747 
748 	nex_free(prtn);
749 
750 	return res;
751 }
752 #endif /*CFG_CORE_SEL1_SPMC*/
753 
754 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
755 				    void (*data_destroy)(void *data))
756 {
757 	void *p = NULL;
758 
759 	/*
760 	 * This function only executes successfully in a single threaded
761 	 * environment before exiting to the normal world the first time.
762 	 * If add_disabled is true, it means we're not in this environment
763 	 * any longer.
764 	 */
765 
766 	if (add_disabled)
767 		return TEE_ERROR_BAD_PARAMETERS;
768 
769 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
770 	if (!p)
771 		return TEE_ERROR_OUT_OF_MEMORY;
772 	gsd_array = p;
773 
774 	gsd_array[gsd_count] = (struct guest_spec_data){
775 		.size = data_size,
776 		.destroy = data_destroy,
777 	};
778 	*data_id = gsd_count + 1;
779 	gsd_count++;
780 	return TEE_SUCCESS;
781 }
782 
783 void *virt_get_guest_spec_data(struct guest_partition *prtn,
784 			       unsigned int data_id)
785 {
786 	assert(data_id);
787 	if (!data_id || !prtn || data_id > gsd_count)
788 		return NULL;
789 	return prtn->data_array[data_id - 1];
790 }
791 
792 static TEE_Result virt_disable_add(void)
793 {
794 	add_disabled = true;
795 
796 	return TEE_SUCCESS;
797 }
798 nex_release_init_resource(virt_disable_add);
799