xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision a951eb5fd0d6db6c8f96ceb1f3af804020005f84)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023-2024, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/panic.h>
14 #include <kernel/refcount.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread_spmc.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/tee_mm.h>
21 #include <platform_config.h>
22 #include <sm/optee_smc.h>
23 #include <string.h>
24 #include <util.h>
25 
26 LIST_HEAD(prtn_list_head, guest_partition);
27 
28 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
29 
30 static struct prtn_list_head prtn_list __nex_data =
31 	LIST_HEAD_INITIALIZER(prtn_list);
32 static struct prtn_list_head prtn_destroy_list __nex_data =
33 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
34 
35 /* Free pages used for guest partitions */
36 tee_mm_pool_t virt_mapper_pool __nex_bss;
37 
38 /* Memory used by OP-TEE core */
39 struct tee_mmap_region *kmemory_map __nex_bss;
40 
41 struct guest_partition {
42 	LIST_ENTRY(guest_partition) link;
43 	struct mmu_partition *mmu_prtn;
44 	struct tee_mmap_region *memory_map;
45 	struct mutex mutex;
46 	void *tables_va;
47 	tee_mm_entry_t *tee_ram;
48 	tee_mm_entry_t *ta_ram;
49 	tee_mm_entry_t *tables;
50 	bool runtime_initialized;
51 	bool shutting_down;
52 	uint16_t id;
53 	struct refcount refc;
54 #ifdef CFG_CORE_SEL1_SPMC
55 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
56 	uint8_t cookie_count;
57 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
58 #endif
59 };
60 
61 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
62 
63 static struct guest_partition *get_current_prtn(void)
64 {
65 	struct guest_partition *ret;
66 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
67 
68 	ret = current_partition[get_core_pos()];
69 
70 	thread_unmask_exceptions(exceptions);
71 
72 	return ret;
73 }
74 
75 uint16_t virt_get_current_guest_id(void)
76 {
77 	struct guest_partition *prtn = get_current_prtn();
78 
79 	if (!prtn)
80 		return 0;
81 	return prtn->id;
82 }
83 
84 static void set_current_prtn(struct guest_partition *prtn)
85 {
86 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
87 
88 	current_partition[get_core_pos()] = prtn;
89 
90 	thread_unmask_exceptions(exceptions);
91 }
92 
93 static size_t get_ta_ram_size(void)
94 {
95 	size_t ta_size = 0;
96 
97 	core_mmu_get_ta_range(NULL, &ta_size);
98 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
99 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
100 }
101 
102 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
103 						  paddr_t ta_ram)
104 {
105 	int i, entries;
106 	vaddr_t max_va = 0;
107 	struct tee_mmap_region *map;
108 	/*
109 	 * This function assumes that at time of operation,
110 	 * kmemory_map (aka static_memory_map from core_mmu.c)
111 	 * will not be altered. This is true, because all
112 	 * changes to static_memory_map are done during
113 	 * OP-TEE initialization, while this function will
114 	 * called when hypervisor creates a guest.
115 	 */
116 
117 	/* Count number of entries in nexus memory map */
118 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
119 	     map++, entries++)
120 		;
121 
122 	/* Allocate entries for virtual guest map */
123 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
124 	if (!map)
125 		return NULL;
126 
127 	memcpy(map, kmemory_map, sizeof(*map) * entries);
128 
129 	/* Map TEE .data and .bss sections */
130 	for (i = 0; i < entries; i++) {
131 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
132 			map[i].type = MEM_AREA_TEE_RAM_RW;
133 			map[i].attr = core_mmu_type_to_attr(map[i].type);
134 			map[i].pa = tee_data;
135 		}
136 		if (map[i].va + map[i].size > max_va)
137 			max_va = map[i].va + map[i].size;
138 	}
139 
140 	/* Map TA_RAM */
141 	assert(map[entries - 1].type == MEM_AREA_END);
142 	map[entries] = map[entries - 1];
143 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
144 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
145 	map[entries - 1].va +=
146 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
147 	map[entries - 1].pa = ta_ram;
148 	map[entries - 1].size = get_ta_ram_size();
149 	map[entries - 1].type = MEM_AREA_TA_RAM;
150 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
151 
152 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
153 
154 	for (i = 0; i < entries; i++)
155 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
156 		     teecore_memtype_name(map[i].type),
157 		     map[i].region_size, map[i].pa, map[i].va,
158 		     map[i].size, map[i].attr);
159 	return map;
160 }
161 
162 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
163 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
164 		      paddr_size_t secmem1_size)
165 {
166 	struct tee_mmap_region *map = NULL;
167 	paddr_size_t size = secmem0_size;
168 	paddr_t base = secmem0_base;
169 
170 	if (secmem1_size) {
171 		assert(secmem0_base + secmem0_size <= secmem1_base);
172 		size = secmem1_base + secmem1_size - base;
173 	}
174 
175 	/* Init page pool that covers all secure RAM */
176 	if (!tee_mm_init(&virt_mapper_pool, base, size,
177 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
178 		panic("Can't create pool with free pages");
179 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
180 	     base, base + size);
181 
182 	if (secmem1_size) {
183 		/* Carve out an eventual gap between secmem0 and secmem1 */
184 		base = secmem0_base + secmem0_size;
185 		size = secmem1_base - base;
186 		if (size) {
187 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
188 			     base, size);
189 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
190 				panic("Can't carve out secmem gap");
191 		}
192 	}
193 
194 
195 	/* Carve out areas that are used by OP-TEE core */
196 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
197 		switch (map->type) {
198 		case MEM_AREA_TEE_RAM_RX:
199 		case MEM_AREA_TEE_RAM_RO:
200 		case MEM_AREA_NEX_RAM_RO:
201 		case MEM_AREA_NEX_RAM_RW:
202 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
203 			     map->type, map->pa, map->pa + map->size);
204 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
205 					   map->size))
206 				panic("Can't carve out used area");
207 			break;
208 		default:
209 			continue;
210 		}
211 	}
212 
213 	kmemory_map = memory_map;
214 }
215 
216 
217 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
218 {
219 	TEE_Result res = TEE_SUCCESS;
220 	paddr_t original_data_pa = 0;
221 
222 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
223 	if (!prtn->tee_ram) {
224 		EMSG("Can't allocate memory for TEE runtime context");
225 		res = TEE_ERROR_OUT_OF_MEMORY;
226 		goto err;
227 	}
228 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
229 
230 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
231 	if (!prtn->ta_ram) {
232 		EMSG("Can't allocate memory for TA data");
233 		res = TEE_ERROR_OUT_OF_MEMORY;
234 		goto err;
235 	}
236 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
237 
238 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
239 				   core_mmu_get_total_pages_size());
240 	if (!prtn->tables) {
241 		EMSG("Can't allocate memory for page tables");
242 		res = TEE_ERROR_OUT_OF_MEMORY;
243 		goto err;
244 	}
245 
246 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
247 				      MEM_AREA_SEC_RAM_OVERALL,
248 				      core_mmu_get_total_pages_size());
249 	assert(prtn->tables_va);
250 
251 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
252 	if (!prtn->mmu_prtn) {
253 		res = TEE_ERROR_OUT_OF_MEMORY;
254 		goto err;
255 	}
256 
257 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
258 					     tee_mm_get_smem(prtn->ta_ram));
259 	if (!prtn->memory_map) {
260 		res = TEE_ERROR_OUT_OF_MEMORY;
261 		goto err;
262 	}
263 
264 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
265 
266 	original_data_pa = virt_to_phys(__data_start);
267 	/* Switch to guest's mappings */
268 	core_mmu_set_prtn(prtn->mmu_prtn);
269 
270 	/* clear .bss */
271 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
272 
273 	/* copy .data section from R/O original */
274 	memcpy(__data_start,
275 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
276 			    __data_end - __data_start),
277 	       __data_end - __data_start);
278 
279 	return TEE_SUCCESS;
280 
281 err:
282 	if (prtn->tee_ram)
283 		tee_mm_free(prtn->tee_ram);
284 	if (prtn->ta_ram)
285 		tee_mm_free(prtn->ta_ram);
286 	if (prtn->tables)
287 		tee_mm_free(prtn->tables);
288 	nex_free(prtn->mmu_prtn);
289 	nex_free(prtn->memory_map);
290 
291 	return res;
292 }
293 
294 TEE_Result virt_guest_created(uint16_t guest_id)
295 {
296 	struct guest_partition *prtn = NULL;
297 	TEE_Result res = TEE_SUCCESS;
298 	uint32_t exceptions = 0;
299 
300 	prtn = nex_calloc(1, sizeof(*prtn));
301 	if (!prtn)
302 		return TEE_ERROR_OUT_OF_MEMORY;
303 
304 	prtn->id = guest_id;
305 	mutex_init(&prtn->mutex);
306 	refcount_set(&prtn->refc, 1);
307 	res = configure_guest_prtn_mem(prtn);
308 	if (res)
309 		goto err_free_prtn;
310 
311 	set_current_prtn(prtn);
312 
313 	/* Initialize threads */
314 	thread_init_threads();
315 	/* Do the preinitcalls */
316 	call_preinitcalls();
317 
318 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
319 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
320 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
321 
322 	IMSG("Added guest %d", guest_id);
323 
324 	set_current_prtn(NULL);
325 	core_mmu_set_default_prtn();
326 
327 	return TEE_SUCCESS;
328 
329 err_free_prtn:
330 	nex_free(prtn);
331 	return res;
332 }
333 
334 static bool
335 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
336 {
337 #ifdef CFG_CORE_SEL1_SPMC
338 	int i = 0;
339 
340 	if (prtn->cookie_count)
341 		return true;
342 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
343 	return i >= 0;
344 #else
345 	return false;
346 #endif
347 }
348 
349 static void get_prtn(struct guest_partition *prtn)
350 {
351 	if (!refcount_inc(&prtn->refc))
352 		panic();
353 }
354 
355 uint16_t virt_get_guest_id(struct guest_partition *prtn)
356 {
357 	if (!prtn)
358 		return 0;
359 	return prtn->id;
360 }
361 
362 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
363 {
364 	struct guest_partition *prtn = NULL;
365 
366 	LIST_FOREACH(prtn, &prtn_list, link)
367 		if (!prtn->shutting_down && prtn->id == guest_id)
368 			return prtn;
369 
370 	return NULL;
371 }
372 
373 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
374 {
375 	struct guest_partition *ret = NULL;
376 	uint32_t exceptions = 0;
377 
378 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
379 	if (prtn)
380 		ret = LIST_NEXT(prtn, link);
381 	else
382 		ret = LIST_FIRST(&prtn_list);
383 
384 	while (ret && ret->shutting_down)
385 		ret = LIST_NEXT(prtn, link);
386 	if (ret)
387 		get_prtn(ret);
388 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
389 
390 	virt_put_guest(prtn);
391 
392 	return ret;
393 }
394 
395 struct guest_partition *virt_get_current_guest(void)
396 {
397 	struct guest_partition *prtn = get_current_prtn();
398 
399 	if (prtn)
400 		get_prtn(prtn);
401 	return prtn;
402 }
403 
404 struct guest_partition *virt_get_guest(uint16_t guest_id)
405 {
406 	struct guest_partition *prtn = NULL;
407 	uint32_t exceptions = 0;
408 
409 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
410 	prtn = find_guest_by_id_unlocked(guest_id);
411 	if (prtn)
412 		get_prtn(prtn);
413 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
414 
415 	return prtn;
416 }
417 
418 void virt_put_guest(struct guest_partition *prtn)
419 {
420 	if (prtn && refcount_dec(&prtn->refc)) {
421 		uint32_t exceptions = 0;
422 		bool do_free = true;
423 
424 		assert(prtn->shutting_down);
425 
426 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
427 		LIST_REMOVE(prtn, link);
428 		if (prtn_have_remaining_resources(prtn)) {
429 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
430 			/*
431 			 * Delay the nex_free() until
432 			 * virt_reclaim_cookie_from_destroyed_guest()
433 			 * is done with this partition.
434 			 */
435 			do_free = false;
436 		}
437 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
438 
439 		tee_mm_free(prtn->tee_ram);
440 		prtn->tee_ram = NULL;
441 		tee_mm_free(prtn->ta_ram);
442 		prtn->ta_ram = NULL;
443 		tee_mm_free(prtn->tables);
444 		prtn->tables = NULL;
445 		core_free_mmu_prtn(prtn->mmu_prtn);
446 		prtn->mmu_prtn = NULL;
447 		nex_free(prtn->memory_map);
448 		prtn->memory_map = NULL;
449 		if (do_free)
450 			nex_free(prtn);
451 	}
452 }
453 
454 TEE_Result virt_guest_destroyed(uint16_t guest_id)
455 {
456 	struct guest_partition *prtn = NULL;
457 	uint32_t exceptions = 0;
458 
459 	IMSG("Removing guest %"PRId16, guest_id);
460 
461 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
462 
463 	prtn = find_guest_by_id_unlocked(guest_id);
464 	if (prtn)
465 		prtn->shutting_down = true;
466 
467 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
468 
469 	virt_put_guest(prtn);
470 	if (!prtn)
471 		EMSG("Client with id %d is not found", guest_id);
472 
473 	return TEE_SUCCESS;
474 }
475 
476 TEE_Result virt_set_guest(uint16_t guest_id)
477 {
478 	struct guest_partition *prtn = get_current_prtn();
479 
480 	/* This can be true only if we return from IRQ RPC */
481 	if (prtn && prtn->id == guest_id)
482 		return TEE_SUCCESS;
483 
484 	if (prtn)
485 		panic("Virtual guest partition is already set");
486 
487 	prtn = virt_get_guest(guest_id);
488 	if (!prtn)
489 		return TEE_ERROR_ITEM_NOT_FOUND;
490 
491 	set_current_prtn(prtn);
492 	core_mmu_set_prtn(prtn->mmu_prtn);
493 
494 	return TEE_SUCCESS;
495 }
496 
497 void virt_unset_guest(void)
498 {
499 	struct guest_partition *prtn = get_current_prtn();
500 
501 	if (!prtn)
502 		return;
503 
504 	set_current_prtn(NULL);
505 	core_mmu_set_default_prtn();
506 	virt_put_guest(prtn);
507 }
508 
509 void virt_on_stdcall(void)
510 {
511 	struct guest_partition *prtn = get_current_prtn();
512 
513 	/* Initialize runtime on first std call */
514 	if (!prtn->runtime_initialized) {
515 		mutex_lock(&prtn->mutex);
516 		if (!prtn->runtime_initialized) {
517 			init_tee_runtime();
518 			prtn->runtime_initialized = true;
519 		}
520 		mutex_unlock(&prtn->mutex);
521 	}
522 }
523 
524 struct tee_mmap_region *virt_get_memory_map(void)
525 {
526 	struct guest_partition *prtn;
527 
528 	prtn = get_current_prtn();
529 
530 	if (!prtn)
531 		return NULL;
532 
533 	return prtn->memory_map;
534 }
535 
536 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
537 {
538 	struct guest_partition *prtn = get_current_prtn();
539 
540 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
541 				       MEM_AREA_TA_RAM,
542 				       tee_mm_get_bytes(prtn->ta_ram));
543 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
544 }
545 
546 #ifdef CFG_CORE_SEL1_SPMC
547 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
548 {
549 	int i = 0;
550 
551 	for (i = 0; i < prtn->cookie_count; i++)
552 		if (prtn->cookies[i] == cookie)
553 			return i;
554 	return -1;
555 }
556 
557 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
558 {
559 	struct guest_partition *prtn = NULL;
560 	int i = 0;
561 
562 	LIST_FOREACH(prtn, &prtn_list, link) {
563 		i = find_cookie(prtn, cookie);
564 		if (i >= 0) {
565 			if (idx)
566 				*idx = i;
567 			return prtn;
568 		}
569 	}
570 
571 	return NULL;
572 }
573 
574 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
575 {
576 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
577 	struct guest_partition *prtn = NULL;
578 	uint32_t exceptions = 0;
579 
580 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
581 	if (find_prtn_cookie(cookie, NULL))
582 		goto out;
583 
584 	prtn = current_partition[get_core_pos()];
585 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
586 		prtn->cookies[prtn->cookie_count] = cookie;
587 		prtn->cookie_count++;
588 		res = TEE_SUCCESS;
589 	}
590 out:
591 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
592 
593 	return res;
594 }
595 
596 void virt_remove_cookie(uint64_t cookie)
597 {
598 	struct guest_partition *prtn = NULL;
599 	uint32_t exceptions = 0;
600 	int i = 0;
601 
602 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
603 	prtn = find_prtn_cookie(cookie, &i);
604 	if (prtn) {
605 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
606 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
607 		prtn->cookie_count--;
608 	}
609 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
610 }
611 
612 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
613 {
614 	struct guest_partition *prtn = NULL;
615 	uint32_t exceptions = 0;
616 	uint16_t ret = 0;
617 
618 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
619 	prtn = find_prtn_cookie(cookie, NULL);
620 	if (prtn)
621 		ret = prtn->id;
622 
623 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
624 
625 	return ret;
626 }
627 
628 bitstr_t *virt_get_shm_bits(void)
629 {
630 	return get_current_prtn()->shm_bits;
631 }
632 
633 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
634 {
635 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
636 		size_t n = 0;
637 
638 		for (n = 0; n < prtn->cookie_count; n++) {
639 			if (prtn->cookies[n] == cookie) {
640 				memmove(prtn->cookies + n,
641 					prtn->cookies + n + 1,
642 					sizeof(uint64_t) *
643 						(prtn->cookie_count - n - 1));
644 				prtn->cookie_count--;
645 				return TEE_SUCCESS;
646 			}
647 		}
648 	} else {
649 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
650 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
651 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
652 		int64_t i = cookie & ~mask;
653 
654 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
655 		    bit_test(prtn->shm_bits, i)) {
656 			bit_clear(prtn->shm_bits, i);
657 			return TEE_SUCCESS;
658 		}
659 	}
660 
661 	return TEE_ERROR_ITEM_NOT_FOUND;
662 }
663 
664 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
665 						    uint64_t cookie)
666 
667 {
668 	struct guest_partition *prtn = NULL;
669 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
670 	uint32_t exceptions = 0;
671 
672 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
673 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
674 		if (prtn->id == guest_id) {
675 			res = reclaim_cookie(prtn, cookie);
676 			if (prtn_have_remaining_resources(prtn))
677 				prtn = NULL;
678 			else
679 				LIST_REMOVE(prtn, link);
680 			break;
681 		}
682 	}
683 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
684 
685 	nex_free(prtn);
686 
687 	return res;
688 }
689 #endif /*CFG_CORE_SEL1_SPMC*/
690