xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 1868eb206733e931b6c6c2d85d55e646bc8a2496)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/phys_mem.h>
22 #include <mm/tee_mm.h>
23 #include <platform_config.h>
24 #include <sm/optee_smc.h>
25 #include <string.h>
26 #include <string_ext.h>
27 #include <util.h>
28 
29 LIST_HEAD(prtn_list_head, guest_partition);
30 
31 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
32 
33 static struct prtn_list_head prtn_list __nex_data =
34 	LIST_HEAD_INITIALIZER(prtn_list);
35 static struct prtn_list_head prtn_destroy_list __nex_data =
36 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
37 
38 /* Memory used by OP-TEE core */
39 struct memory_map *kmem_map __nex_bss;
40 
41 struct guest_spec_data {
42 	size_t size;
43 	void (*destroy)(void *data);
44 };
45 
46 static bool add_disabled __nex_bss;
47 static unsigned gsd_count __nex_bss;
48 static struct guest_spec_data *gsd_array __nex_bss;
49 
50 struct guest_partition {
51 	LIST_ENTRY(guest_partition) link;
52 	struct mmu_partition *mmu_prtn;
53 	struct memory_map mem_map;
54 	struct mutex mutex;
55 	void *tables_va;
56 	tee_mm_entry_t *tee_ram;
57 	tee_mm_entry_t *ta_ram;
58 	tee_mm_entry_t *tables;
59 	bool runtime_initialized;
60 	bool got_guest_destroyed;
61 	bool shutting_down;
62 	uint16_t id;
63 	struct refcount refc;
64 #ifdef CFG_CORE_SEL1_SPMC
65 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
66 	uint8_t cookie_count;
67 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
68 #endif
69 	void **data_array;
70 };
71 
72 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
73 
74 static struct guest_partition *get_current_prtn(void)
75 {
76 	struct guest_partition *ret;
77 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
78 
79 	ret = current_partition[get_core_pos()];
80 
81 	thread_unmask_exceptions(exceptions);
82 
83 	return ret;
84 }
85 
86 uint16_t virt_get_current_guest_id(void)
87 {
88 	struct guest_partition *prtn = get_current_prtn();
89 
90 	if (!prtn)
91 		return 0;
92 	return prtn->id;
93 }
94 
95 static void set_current_prtn(struct guest_partition *prtn)
96 {
97 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
98 
99 	current_partition[get_core_pos()] = prtn;
100 
101 	thread_unmask_exceptions(exceptions);
102 }
103 
104 static size_t get_ta_ram_size(void)
105 {
106 	size_t ta_size = nex_phys_mem_get_ta_size();
107 
108 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
109 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
110 }
111 
112 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
113 				     paddr_t tee_data, paddr_t ta_ram)
114 {
115 	struct tee_mmap_region *map = NULL;
116 	vaddr_t max_va = 0;
117 	size_t n = 0;
118 	/*
119 	 * This function assumes that at time of operation,
120 	 * kmemory_map (aka static_memory_map from core_mmu.c)
121 	 * will not be altered. This is true, because all
122 	 * changes to static_memory_map are done during
123 	 * OP-TEE initialization, while this function will
124 	 * called when hypervisor creates a guest.
125 	 */
126 
127 	/* Allocate entries for virtual guest map */
128 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
129 	if (!mem_map->map)
130 		return TEE_ERROR_OUT_OF_MEMORY;
131 	mem_map->count = kmem_map->count;
132 	mem_map->alloc_count = kmem_map->count + 1;
133 
134 	memcpy(mem_map->map, kmem_map->map,
135 	       sizeof(*mem_map->map) * mem_map->count);
136 
137 	/* Map TEE .data and .bss sections */
138 	for (n = 0; n < mem_map->count; n++) {
139 		map = mem_map->map + n;
140 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
141 			map->type = MEM_AREA_TEE_RAM_RW;
142 			map->attr = core_mmu_type_to_attr(map->type);
143 			map->pa = tee_data;
144 		}
145 		if (map->va + map->size > max_va)
146 			max_va = map->va + map->size;
147 	}
148 
149 	/* Map TA_RAM */
150 	mem_map->count++;
151 	map = ins_array_elem(mem_map->map, mem_map->count,
152 			     sizeof(*mem_map->map), n, NULL);
153 	map->region_size = SMALL_PAGE_SIZE;
154 	map->va = ROUNDUP(max_va, map->region_size);
155 	map->va += (ta_ram - map->va) & CORE_MMU_PGDIR_MASK;
156 	map->pa = ta_ram;
157 	map->size = get_ta_ram_size();
158 	map->type = MEM_AREA_TA_RAM;
159 	map->attr = core_mmu_type_to_attr(map->type);
160 
161 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
162 
163 	for (n = 0; n < mem_map->count; n++)
164 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
165 		     teecore_memtype_name(mem_map->map[n].type),
166 		     mem_map->map[n].region_size, mem_map->map[n].pa,
167 		     mem_map->map[n].va, mem_map->map[n].size,
168 		     mem_map->map[n].attr);
169 	return TEE_SUCCESS;
170 }
171 
172 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
173 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
174 		      paddr_size_t secmem1_size)
175 {
176 	size_t n = 0;
177 
178 	/* Init page pool that covers all secure RAM */
179 	nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
180 			  secmem1_size);
181 
182 	/* Carve out areas that are used by OP-TEE core */
183 	for (n = 0; n < mem_map->count; n++) {
184 		struct tee_mmap_region *map = mem_map->map + n;
185 
186 		switch (map->type) {
187 		case MEM_AREA_TEE_RAM_RX:
188 		case MEM_AREA_TEE_RAM_RO:
189 		case MEM_AREA_NEX_RAM_RO:
190 		case MEM_AREA_NEX_RAM_RW:
191 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
192 			     map->type, map->pa, map->pa + map->size);
193 			if (!nex_phys_mem_alloc2(map->pa, map->size))
194 				panic("Can't carve out used area");
195 			break;
196 		default:
197 			continue;
198 		}
199 	}
200 
201 	kmem_map = mem_map;
202 }
203 
204 
205 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
206 {
207 	TEE_Result res = TEE_SUCCESS;
208 	paddr_t original_data_pa = 0;
209 
210 	prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
211 	if (!prtn->tee_ram) {
212 		EMSG("Can't allocate memory for TEE runtime context");
213 		res = TEE_ERROR_OUT_OF_MEMORY;
214 		goto err;
215 	}
216 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
217 
218 	prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
219 	if (!prtn->ta_ram) {
220 		EMSG("Can't allocate memory for TA data");
221 		res = TEE_ERROR_OUT_OF_MEMORY;
222 		goto err;
223 	}
224 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
225 
226 	prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
227 	if (!prtn->tables) {
228 		EMSG("Can't allocate memory for page tables");
229 		res = TEE_ERROR_OUT_OF_MEMORY;
230 		goto err;
231 	}
232 
233 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
234 				      MEM_AREA_SEC_RAM_OVERALL,
235 				      core_mmu_get_total_pages_size());
236 	assert(prtn->tables_va);
237 
238 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
239 	if (!prtn->mmu_prtn) {
240 		res = TEE_ERROR_OUT_OF_MEMORY;
241 		goto err;
242 	}
243 
244 	res = prepare_memory_map(&prtn->mem_map, tee_mm_get_smem(prtn->tee_ram),
245 				 tee_mm_get_smem(prtn->ta_ram));
246 	if (res)
247 		goto err;
248 
249 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
250 
251 	original_data_pa = virt_to_phys(__data_start);
252 	/* Switch to guest's mappings */
253 	core_mmu_set_prtn(prtn->mmu_prtn);
254 
255 	/* clear .bss */
256 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
257 
258 	/* copy .data section from R/O original */
259 	memcpy(__data_start,
260 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
261 			    __data_end - __data_start),
262 	       __data_end - __data_start);
263 
264 	return TEE_SUCCESS;
265 
266 err:
267 	if (prtn->tee_ram)
268 		tee_mm_free(prtn->tee_ram);
269 	if (prtn->ta_ram)
270 		tee_mm_free(prtn->ta_ram);
271 	if (prtn->tables)
272 		tee_mm_free(prtn->tables);
273 	nex_free(prtn->mmu_prtn);
274 	nex_free(prtn->mem_map.map);
275 
276 	return res;
277 }
278 
279 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
280 {
281 	size_t n = 0;
282 
283 	for (n = 0; n < gsd_count; n++) {
284 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
285 			gsd_array[n].destroy(prtn->data_array[n]);
286 		nex_free(prtn->data_array[n]);
287 	}
288 	nex_free(prtn->data_array);
289 	prtn->data_array = NULL;
290 }
291 
292 static TEE_Result alloc_gsd(struct guest_partition *prtn)
293 {
294 	unsigned int n = 0;
295 
296 	if (!gsd_count)
297 		return TEE_SUCCESS;
298 
299 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
300 	if (!prtn->data_array)
301 		return TEE_ERROR_OUT_OF_MEMORY;
302 
303 	for (n = 0; n < gsd_count; n++) {
304 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
305 		if (!prtn->data_array[n]) {
306 			destroy_gsd(prtn, true /*free_only*/);
307 			return TEE_ERROR_OUT_OF_MEMORY;
308 		}
309 	}
310 
311 	return TEE_SUCCESS;
312 }
313 TEE_Result virt_guest_created(uint16_t guest_id)
314 {
315 	struct guest_partition *prtn = NULL;
316 	TEE_Result res = TEE_SUCCESS;
317 	uint32_t exceptions = 0;
318 
319 	prtn = nex_calloc(1, sizeof(*prtn));
320 	if (!prtn)
321 		return TEE_ERROR_OUT_OF_MEMORY;
322 
323 	res = alloc_gsd(prtn);
324 	if (res)
325 		goto err_free_prtn;
326 
327 	prtn->id = guest_id;
328 	mutex_init(&prtn->mutex);
329 	refcount_set(&prtn->refc, 1);
330 	res = configure_guest_prtn_mem(prtn);
331 	if (res)
332 		goto err_free_gsd;
333 
334 	set_current_prtn(prtn);
335 
336 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
337 	/* Initialize threads */
338 	thread_init_threads();
339 	/* Do the preinitcalls */
340 	call_preinitcalls();
341 
342 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
343 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
344 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
345 
346 	IMSG("Added guest %d", guest_id);
347 
348 	set_current_prtn(NULL);
349 	core_mmu_set_default_prtn();
350 
351 	return TEE_SUCCESS;
352 
353 err_free_gsd:
354 	destroy_gsd(prtn, true /*free_only*/);
355 err_free_prtn:
356 	nex_free(prtn);
357 	return res;
358 }
359 
360 static bool
361 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
362 {
363 #ifdef CFG_CORE_SEL1_SPMC
364 	int i = 0;
365 
366 	if (prtn->cookie_count)
367 		return true;
368 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
369 	return i >= 0;
370 #else
371 	return false;
372 #endif
373 }
374 
375 static void get_prtn(struct guest_partition *prtn)
376 {
377 	if (!refcount_inc(&prtn->refc))
378 		panic();
379 }
380 
381 uint16_t virt_get_guest_id(struct guest_partition *prtn)
382 {
383 	if (!prtn)
384 		return 0;
385 	return prtn->id;
386 }
387 
388 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
389 {
390 	struct guest_partition *prtn = NULL;
391 
392 	LIST_FOREACH(prtn, &prtn_list, link)
393 		if (!prtn->shutting_down && prtn->id == guest_id)
394 			return prtn;
395 
396 	return NULL;
397 }
398 
399 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
400 {
401 	struct guest_partition *ret = NULL;
402 	uint32_t exceptions = 0;
403 
404 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
405 	if (prtn)
406 		ret = LIST_NEXT(prtn, link);
407 	else
408 		ret = LIST_FIRST(&prtn_list);
409 
410 	while (ret && ret->shutting_down)
411 		ret = LIST_NEXT(prtn, link);
412 	if (ret)
413 		get_prtn(ret);
414 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
415 
416 	virt_put_guest(prtn);
417 
418 	return ret;
419 }
420 
421 struct guest_partition *virt_get_current_guest(void)
422 {
423 	struct guest_partition *prtn = get_current_prtn();
424 
425 	if (prtn)
426 		get_prtn(prtn);
427 	return prtn;
428 }
429 
430 struct guest_partition *virt_get_guest(uint16_t guest_id)
431 {
432 	struct guest_partition *prtn = NULL;
433 	uint32_t exceptions = 0;
434 
435 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
436 	prtn = find_guest_by_id_unlocked(guest_id);
437 	if (prtn)
438 		get_prtn(prtn);
439 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
440 
441 	return prtn;
442 }
443 
444 void virt_put_guest(struct guest_partition *prtn)
445 {
446 	if (prtn && refcount_dec(&prtn->refc)) {
447 		uint32_t exceptions = 0;
448 		bool do_free = true;
449 
450 		assert(prtn->shutting_down);
451 
452 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
453 		LIST_REMOVE(prtn, link);
454 		if (prtn_have_remaining_resources(prtn)) {
455 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
456 			/*
457 			 * Delay the nex_free() until
458 			 * virt_reclaim_cookie_from_destroyed_guest()
459 			 * is done with this partition.
460 			 */
461 			do_free = false;
462 		}
463 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
464 
465 		destroy_gsd(prtn, false /*!free_only*/);
466 		tee_mm_free(prtn->tee_ram);
467 		prtn->tee_ram = NULL;
468 		tee_mm_free(prtn->ta_ram);
469 		prtn->ta_ram = NULL;
470 		tee_mm_free(prtn->tables);
471 		prtn->tables = NULL;
472 		core_free_mmu_prtn(prtn->mmu_prtn);
473 		prtn->mmu_prtn = NULL;
474 		nex_free(prtn->mem_map.map);
475 		prtn->mem_map.map = NULL;
476 		if (do_free)
477 			nex_free(prtn);
478 	}
479 }
480 
481 TEE_Result virt_guest_destroyed(uint16_t guest_id)
482 {
483 	struct guest_partition *prtn = NULL;
484 	uint32_t exceptions = 0;
485 
486 	IMSG("Removing guest %"PRId16, guest_id);
487 
488 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
489 
490 	prtn = find_guest_by_id_unlocked(guest_id);
491 	if (prtn && !prtn->got_guest_destroyed)
492 		prtn->got_guest_destroyed = true;
493 	else
494 		prtn = NULL;
495 
496 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
497 
498 	if (prtn) {
499 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
500 
501 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
502 		prtn->shutting_down = true;
503 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
504 
505 		virt_put_guest(prtn);
506 	} else {
507 		EMSG("Client with id %d is not found", guest_id);
508 	}
509 
510 	return TEE_SUCCESS;
511 }
512 
513 TEE_Result virt_set_guest(uint16_t guest_id)
514 {
515 	struct guest_partition *prtn = get_current_prtn();
516 
517 	/* This can be true only if we return from IRQ RPC */
518 	if (prtn && prtn->id == guest_id)
519 		return TEE_SUCCESS;
520 
521 	if (prtn)
522 		panic("Virtual guest partition is already set");
523 
524 	prtn = virt_get_guest(guest_id);
525 	if (!prtn)
526 		return TEE_ERROR_ITEM_NOT_FOUND;
527 
528 	set_current_prtn(prtn);
529 	core_mmu_set_prtn(prtn->mmu_prtn);
530 
531 	return TEE_SUCCESS;
532 }
533 
534 void virt_unset_guest(void)
535 {
536 	struct guest_partition *prtn = get_current_prtn();
537 
538 	if (!prtn)
539 		return;
540 
541 	set_current_prtn(NULL);
542 	core_mmu_set_default_prtn();
543 	virt_put_guest(prtn);
544 }
545 
546 void virt_on_stdcall(void)
547 {
548 	struct guest_partition *prtn = get_current_prtn();
549 
550 	/* Initialize runtime on first std call */
551 	if (!prtn->runtime_initialized) {
552 		mutex_lock(&prtn->mutex);
553 		if (!prtn->runtime_initialized) {
554 			init_tee_runtime();
555 			call_driver_initcalls();
556 			prtn->runtime_initialized = true;
557 		}
558 		mutex_unlock(&prtn->mutex);
559 	}
560 }
561 
562 struct memory_map *virt_get_memory_map(void)
563 {
564 	struct guest_partition *prtn;
565 
566 	prtn = get_current_prtn();
567 
568 	if (!prtn)
569 		return NULL;
570 
571 	return &prtn->mem_map;
572 }
573 
574 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
575 {
576 	struct guest_partition *prtn = get_current_prtn();
577 
578 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
579 				       MEM_AREA_TA_RAM,
580 				       tee_mm_get_bytes(prtn->ta_ram));
581 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
582 }
583 
584 #ifdef CFG_CORE_SEL1_SPMC
585 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
586 {
587 	int i = 0;
588 
589 	for (i = 0; i < prtn->cookie_count; i++)
590 		if (prtn->cookies[i] == cookie)
591 			return i;
592 	return -1;
593 }
594 
595 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
596 {
597 	struct guest_partition *prtn = NULL;
598 	int i = 0;
599 
600 	LIST_FOREACH(prtn, &prtn_list, link) {
601 		i = find_cookie(prtn, cookie);
602 		if (i >= 0) {
603 			if (idx)
604 				*idx = i;
605 			return prtn;
606 		}
607 	}
608 
609 	return NULL;
610 }
611 
612 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
613 {
614 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
615 	struct guest_partition *prtn = NULL;
616 	uint32_t exceptions = 0;
617 
618 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
619 	if (find_prtn_cookie(cookie, NULL))
620 		goto out;
621 
622 	prtn = current_partition[get_core_pos()];
623 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
624 		prtn->cookies[prtn->cookie_count] = cookie;
625 		prtn->cookie_count++;
626 		res = TEE_SUCCESS;
627 	}
628 out:
629 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
630 
631 	return res;
632 }
633 
634 void virt_remove_cookie(uint64_t cookie)
635 {
636 	struct guest_partition *prtn = NULL;
637 	uint32_t exceptions = 0;
638 	int i = 0;
639 
640 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
641 	prtn = find_prtn_cookie(cookie, &i);
642 	if (prtn) {
643 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
644 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
645 		prtn->cookie_count--;
646 	}
647 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
648 }
649 
650 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
651 {
652 	struct guest_partition *prtn = NULL;
653 	uint32_t exceptions = 0;
654 	uint16_t ret = 0;
655 
656 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
657 	prtn = find_prtn_cookie(cookie, NULL);
658 	if (prtn)
659 		ret = prtn->id;
660 
661 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
662 
663 	return ret;
664 }
665 
666 bitstr_t *virt_get_shm_bits(void)
667 {
668 	return get_current_prtn()->shm_bits;
669 }
670 
671 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
672 {
673 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
674 		size_t n = 0;
675 
676 		for (n = 0; n < prtn->cookie_count; n++) {
677 			if (prtn->cookies[n] == cookie) {
678 				memmove(prtn->cookies + n,
679 					prtn->cookies + n + 1,
680 					sizeof(uint64_t) *
681 						(prtn->cookie_count - n - 1));
682 				prtn->cookie_count--;
683 				return TEE_SUCCESS;
684 			}
685 		}
686 	} else {
687 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
688 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
689 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
690 		int64_t i = cookie & ~mask;
691 
692 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
693 		    bit_test(prtn->shm_bits, i)) {
694 			bit_clear(prtn->shm_bits, i);
695 			return TEE_SUCCESS;
696 		}
697 	}
698 
699 	return TEE_ERROR_ITEM_NOT_FOUND;
700 }
701 
702 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
703 						    uint64_t cookie)
704 
705 {
706 	struct guest_partition *prtn = NULL;
707 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
708 	uint32_t exceptions = 0;
709 
710 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
711 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
712 		if (prtn->id == guest_id) {
713 			res = reclaim_cookie(prtn, cookie);
714 			if (prtn_have_remaining_resources(prtn))
715 				prtn = NULL;
716 			else
717 				LIST_REMOVE(prtn, link);
718 			break;
719 		}
720 	}
721 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
722 
723 	nex_free(prtn);
724 
725 	return res;
726 }
727 #endif /*CFG_CORE_SEL1_SPMC*/
728 
729 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
730 				    void (*data_destroy)(void *data))
731 {
732 	void *p = NULL;
733 
734 	/*
735 	 * This function only executes successfully in a single threaded
736 	 * environment before exiting to the normal world the first time.
737 	 * If add_disabled is true, it means we're not in this environment
738 	 * any longer.
739 	 */
740 
741 	if (add_disabled)
742 		return TEE_ERROR_BAD_PARAMETERS;
743 
744 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
745 	if (!p)
746 		return TEE_ERROR_OUT_OF_MEMORY;
747 	gsd_array = p;
748 
749 	gsd_array[gsd_count] = (struct guest_spec_data){
750 		.size = data_size,
751 		.destroy = data_destroy,
752 	};
753 	*data_id = gsd_count + 1;
754 	gsd_count++;
755 	return TEE_SUCCESS;
756 }
757 
758 void *virt_get_guest_spec_data(struct guest_partition *prtn,
759 			       unsigned int data_id)
760 {
761 	assert(data_id);
762 	if (!data_id || !prtn || data_id > gsd_count)
763 		return NULL;
764 	return prtn->data_array[data_id - 1];
765 }
766 
767 static TEE_Result virt_disable_add(void)
768 {
769 	add_disabled = true;
770 
771 	return TEE_SUCCESS;
772 }
773 nex_release_init_resource(virt_disable_add);
774