xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 91d4649de98c6beeb8217d40f1fafa50720fe785)
1c4e8be26SVolodymyr Babchuk // SPDX-License-Identifier: BSD-2-Clause
2070d197fSJens Wiklander /*
3070d197fSJens Wiklander  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4ac1c95ddSJens Wiklander  * Copyright (c) 2023-2024, Linaro Limited
5070d197fSJens Wiklander  */
6c4e8be26SVolodymyr Babchuk 
73e0b361eSJens Wiklander #include <bitstring.h>
8c4e8be26SVolodymyr Babchuk #include <compiler.h>
965401337SJens Wiklander #include <kernel/boot.h>
10c4e8be26SVolodymyr Babchuk #include <kernel/linker.h>
11c4e8be26SVolodymyr Babchuk #include <kernel/misc.h>
12070d197fSJens Wiklander #include <kernel/mutex.h>
13d237e616SJens Wiklander #include <kernel/notif.h>
14c4e8be26SVolodymyr Babchuk #include <kernel/panic.h>
15c4e8be26SVolodymyr Babchuk #include <kernel/refcount.h>
16c4e8be26SVolodymyr Babchuk #include <kernel/spinlock.h>
17070d197fSJens Wiklander #include <kernel/thread_spmc.h>
18c4e8be26SVolodymyr Babchuk #include <kernel/virtualization.h>
19c4e8be26SVolodymyr Babchuk #include <mm/core_memprot.h>
20c4e8be26SVolodymyr Babchuk #include <mm/core_mmu.h>
210e12fb0cSJens Wiklander #include <mm/page_alloc.h>
22de19cacbSJens Wiklander #include <mm/phys_mem.h>
23c4e8be26SVolodymyr Babchuk #include <mm/tee_mm.h>
24c4e8be26SVolodymyr Babchuk #include <platform_config.h>
25c4e8be26SVolodymyr Babchuk #include <sm/optee_smc.h>
26c4e8be26SVolodymyr Babchuk #include <string.h>
27b8ef8d0bSJens Wiklander #include <string_ext.h>
28c4e8be26SVolodymyr Babchuk #include <util.h>
29c4e8be26SVolodymyr Babchuk 
304078bcdeSJens Wiklander LIST_HEAD(prtn_list_head, guest_partition);
314078bcdeSJens Wiklander 
32c4e8be26SVolodymyr Babchuk static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
33c4e8be26SVolodymyr Babchuk 
344078bcdeSJens Wiklander static struct prtn_list_head prtn_list __nex_data =
354078bcdeSJens Wiklander 	LIST_HEAD_INITIALIZER(prtn_list);
364078bcdeSJens Wiklander static struct prtn_list_head prtn_destroy_list __nex_data =
374078bcdeSJens Wiklander 	LIST_HEAD_INITIALIZER(prtn_destroy_list);
38c4e8be26SVolodymyr Babchuk 
39c4e8be26SVolodymyr Babchuk /* Memory used by OP-TEE core */
40b8ef8d0bSJens Wiklander struct memory_map *kmem_map __nex_bss;
41c4e8be26SVolodymyr Babchuk 
42beb90210SJens Wiklander struct guest_spec_data {
43beb90210SJens Wiklander 	size_t size;
44beb90210SJens Wiklander 	void (*destroy)(void *data);
45beb90210SJens Wiklander };
46beb90210SJens Wiklander 
47beb90210SJens Wiklander static bool add_disabled __nex_bss;
48beb90210SJens Wiklander static unsigned gsd_count __nex_bss;
49beb90210SJens Wiklander static struct guest_spec_data *gsd_array __nex_bss;
50beb90210SJens Wiklander 
51c4e8be26SVolodymyr Babchuk struct guest_partition {
52c4e8be26SVolodymyr Babchuk 	LIST_ENTRY(guest_partition) link;
53c4e8be26SVolodymyr Babchuk 	struct mmu_partition *mmu_prtn;
54b8ef8d0bSJens Wiklander 	struct memory_map mem_map;
55c4e8be26SVolodymyr Babchuk 	struct mutex mutex;
56c4e8be26SVolodymyr Babchuk 	void *tables_va;
57c4e8be26SVolodymyr Babchuk 	tee_mm_entry_t *tee_ram;
58c4e8be26SVolodymyr Babchuk 	tee_mm_entry_t *ta_ram;
59c4e8be26SVolodymyr Babchuk 	tee_mm_entry_t *tables;
60c4e8be26SVolodymyr Babchuk 	bool runtime_initialized;
61d237e616SJens Wiklander 	bool got_guest_destroyed;
62ac1c95ddSJens Wiklander 	bool shutting_down;
63c4e8be26SVolodymyr Babchuk 	uint16_t id;
64c4e8be26SVolodymyr Babchuk 	struct refcount refc;
65a65dd3a6SJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
66070d197fSJens Wiklander 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
67a65dd3a6SJens Wiklander 	uint8_t cookie_count;
683e0b361eSJens Wiklander 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
69a65dd3a6SJens Wiklander #endif
70beb90210SJens Wiklander 	void **data_array;
71c4e8be26SVolodymyr Babchuk };
72c4e8be26SVolodymyr Babchuk 
73c4e8be26SVolodymyr Babchuk struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
74c4e8be26SVolodymyr Babchuk 
get_current_prtn(void)75c4e8be26SVolodymyr Babchuk static struct guest_partition *get_current_prtn(void)
76c4e8be26SVolodymyr Babchuk {
77c4e8be26SVolodymyr Babchuk 	struct guest_partition *ret;
78c4e8be26SVolodymyr Babchuk 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
79c4e8be26SVolodymyr Babchuk 
80c4e8be26SVolodymyr Babchuk 	ret = current_partition[get_core_pos()];
81c4e8be26SVolodymyr Babchuk 
82c4e8be26SVolodymyr Babchuk 	thread_unmask_exceptions(exceptions);
83c4e8be26SVolodymyr Babchuk 
84c4e8be26SVolodymyr Babchuk 	return ret;
85c4e8be26SVolodymyr Babchuk }
86c4e8be26SVolodymyr Babchuk 
virt_get_current_guest_id(void)8779321a89SJens Wiklander uint16_t virt_get_current_guest_id(void)
8879321a89SJens Wiklander {
8979321a89SJens Wiklander 	struct guest_partition *prtn = get_current_prtn();
9079321a89SJens Wiklander 
9179321a89SJens Wiklander 	if (!prtn)
9279321a89SJens Wiklander 		return 0;
9379321a89SJens Wiklander 	return prtn->id;
9479321a89SJens Wiklander }
9579321a89SJens Wiklander 
set_current_prtn(struct guest_partition * prtn)96c4e8be26SVolodymyr Babchuk static void set_current_prtn(struct guest_partition *prtn)
97c4e8be26SVolodymyr Babchuk {
98c4e8be26SVolodymyr Babchuk 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
99c4e8be26SVolodymyr Babchuk 
100c4e8be26SVolodymyr Babchuk 	current_partition[get_core_pos()] = prtn;
101c4e8be26SVolodymyr Babchuk 
102c4e8be26SVolodymyr Babchuk 	thread_unmask_exceptions(exceptions);
103c4e8be26SVolodymyr Babchuk }
104c4e8be26SVolodymyr Babchuk 
get_ta_ram_size(void)105c4e8be26SVolodymyr Babchuk static size_t get_ta_ram_size(void)
106c4e8be26SVolodymyr Babchuk {
107de19cacbSJens Wiklander 	size_t ta_size = nex_phys_mem_get_ta_size();
10854e4b08cSJens Wiklander 
10954e4b08cSJens Wiklander 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
110e7d7a709SVolodymyr Babchuk 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
111c4e8be26SVolodymyr Babchuk }
112c4e8be26SVolodymyr Babchuk 
prepare_memory_map(struct memory_map * mem_map,paddr_t tee_data)113b8ef8d0bSJens Wiklander static TEE_Result prepare_memory_map(struct memory_map *mem_map,
1142f2f69dfSJens Wiklander 				     paddr_t tee_data)
115c4e8be26SVolodymyr Babchuk {
116b8ef8d0bSJens Wiklander 	struct tee_mmap_region *map = NULL;
117c4e8be26SVolodymyr Babchuk 	vaddr_t max_va = 0;
118b8ef8d0bSJens Wiklander 	size_t n = 0;
119c4e8be26SVolodymyr Babchuk 	/*
120c4e8be26SVolodymyr Babchuk 	 * This function assumes that at time of operation,
121c4e8be26SVolodymyr Babchuk 	 * kmemory_map (aka static_memory_map from core_mmu.c)
122c4e8be26SVolodymyr Babchuk 	 * will not be altered. This is true, because all
123c4e8be26SVolodymyr Babchuk 	 * changes to static_memory_map are done during
124c4e8be26SVolodymyr Babchuk 	 * OP-TEE initialization, while this function will
125c4e8be26SVolodymyr Babchuk 	 * called when hypervisor creates a guest.
126c4e8be26SVolodymyr Babchuk 	 */
127c4e8be26SVolodymyr Babchuk 
128c4e8be26SVolodymyr Babchuk 	/* Allocate entries for virtual guest map */
129b8ef8d0bSJens Wiklander 	mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
130b8ef8d0bSJens Wiklander 	if (!mem_map->map)
131b8ef8d0bSJens Wiklander 		return TEE_ERROR_OUT_OF_MEMORY;
132b8ef8d0bSJens Wiklander 	mem_map->count = kmem_map->count;
133b8ef8d0bSJens Wiklander 	mem_map->alloc_count = kmem_map->count + 1;
134c4e8be26SVolodymyr Babchuk 
135b8ef8d0bSJens Wiklander 	memcpy(mem_map->map, kmem_map->map,
136b8ef8d0bSJens Wiklander 	       sizeof(*mem_map->map) * mem_map->count);
137c4e8be26SVolodymyr Babchuk 
138c4e8be26SVolodymyr Babchuk 	/* Map TEE .data and .bss sections */
139b8ef8d0bSJens Wiklander 	for (n = 0; n < mem_map->count; n++) {
140b8ef8d0bSJens Wiklander 		map = mem_map->map + n;
141b8ef8d0bSJens Wiklander 		if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
142b8ef8d0bSJens Wiklander 			map->type = MEM_AREA_TEE_RAM_RW;
143b8ef8d0bSJens Wiklander 			map->attr = core_mmu_type_to_attr(map->type);
144b8ef8d0bSJens Wiklander 			map->pa = tee_data;
145c4e8be26SVolodymyr Babchuk 		}
146b8ef8d0bSJens Wiklander 		if (map->va + map->size > max_va)
147b8ef8d0bSJens Wiklander 			max_va = map->va + map->size;
148c4e8be26SVolodymyr Babchuk 	}
149c4e8be26SVolodymyr Babchuk 
150c4e8be26SVolodymyr Babchuk 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
151c4e8be26SVolodymyr Babchuk 
152b8ef8d0bSJens Wiklander 	for (n = 0; n < mem_map->count; n++)
153c4e8be26SVolodymyr Babchuk 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
154b8ef8d0bSJens Wiklander 		     teecore_memtype_name(mem_map->map[n].type),
155b8ef8d0bSJens Wiklander 		     mem_map->map[n].region_size, mem_map->map[n].pa,
156b8ef8d0bSJens Wiklander 		     mem_map->map[n].va, mem_map->map[n].size,
157b8ef8d0bSJens Wiklander 		     mem_map->map[n].attr);
158b8ef8d0bSJens Wiklander 	return TEE_SUCCESS;
159c4e8be26SVolodymyr Babchuk }
160c4e8be26SVolodymyr Babchuk 
virt_init_memory(struct memory_map * mem_map,paddr_t secmem0_base,paddr_size_t secmem0_size,paddr_t secmem1_base,paddr_size_t secmem1_size)161b8ef8d0bSJens Wiklander void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
162439d2a89SJens Wiklander 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
163439d2a89SJens Wiklander 		      paddr_size_t secmem1_size)
164c4e8be26SVolodymyr Babchuk {
165b8ef8d0bSJens Wiklander 	size_t n = 0;
166439d2a89SJens Wiklander 
167c4e8be26SVolodymyr Babchuk 	/* Init page pool that covers all secure RAM */
168de19cacbSJens Wiklander 	nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
169de19cacbSJens Wiklander 			  secmem1_size);
170c4e8be26SVolodymyr Babchuk 
171c4e8be26SVolodymyr Babchuk 	/* Carve out areas that are used by OP-TEE core */
172b8ef8d0bSJens Wiklander 	for (n = 0; n < mem_map->count; n++) {
173b8ef8d0bSJens Wiklander 		struct tee_mmap_region *map = mem_map->map + n;
174b8ef8d0bSJens Wiklander 
175c4e8be26SVolodymyr Babchuk 		switch (map->type) {
176c4e8be26SVolodymyr Babchuk 		case MEM_AREA_TEE_RAM_RX:
177c4e8be26SVolodymyr Babchuk 		case MEM_AREA_TEE_RAM_RO:
178ff902aafSJens Wiklander 		case MEM_AREA_NEX_RAM_RO:
179c4e8be26SVolodymyr Babchuk 		case MEM_AREA_NEX_RAM_RW:
180c4e8be26SVolodymyr Babchuk 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
181c4e8be26SVolodymyr Babchuk 			     map->type, map->pa, map->pa + map->size);
182de19cacbSJens Wiklander 			if (!nex_phys_mem_alloc2(map->pa, map->size))
183c4e8be26SVolodymyr Babchuk 				panic("Can't carve out used area");
184c4e8be26SVolodymyr Babchuk 			break;
185c4e8be26SVolodymyr Babchuk 		default:
186c4e8be26SVolodymyr Babchuk 			continue;
187c4e8be26SVolodymyr Babchuk 		}
188c4e8be26SVolodymyr Babchuk 	}
189c4e8be26SVolodymyr Babchuk 
190b8ef8d0bSJens Wiklander 	kmem_map = mem_map;
191c4e8be26SVolodymyr Babchuk }
192c4e8be26SVolodymyr Babchuk 
193c4e8be26SVolodymyr Babchuk 
configure_guest_prtn_mem(struct guest_partition * prtn)194bddb2f89SJens Wiklander static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
195c4e8be26SVolodymyr Babchuk {
196bddb2f89SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
197bddb2f89SJens Wiklander 	paddr_t original_data_pa = 0;
198c4e8be26SVolodymyr Babchuk 
199de19cacbSJens Wiklander 	prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
200c4e8be26SVolodymyr Babchuk 	if (!prtn->tee_ram) {
201c4e8be26SVolodymyr Babchuk 		EMSG("Can't allocate memory for TEE runtime context");
202bddb2f89SJens Wiklander 		res = TEE_ERROR_OUT_OF_MEMORY;
203c4e8be26SVolodymyr Babchuk 		goto err;
204c4e8be26SVolodymyr Babchuk 	}
205c4e8be26SVolodymyr Babchuk 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
206c4e8be26SVolodymyr Babchuk 
207de19cacbSJens Wiklander 	prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
208c4e8be26SVolodymyr Babchuk 	if (!prtn->ta_ram) {
209c4e8be26SVolodymyr Babchuk 		EMSG("Can't allocate memory for TA data");
210bddb2f89SJens Wiklander 		res = TEE_ERROR_OUT_OF_MEMORY;
211c4e8be26SVolodymyr Babchuk 		goto err;
212c4e8be26SVolodymyr Babchuk 	}
213c4e8be26SVolodymyr Babchuk 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
214c4e8be26SVolodymyr Babchuk 
215de19cacbSJens Wiklander 	prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
216c4e8be26SVolodymyr Babchuk 	if (!prtn->tables) {
217c4e8be26SVolodymyr Babchuk 		EMSG("Can't allocate memory for page tables");
218bddb2f89SJens Wiklander 		res = TEE_ERROR_OUT_OF_MEMORY;
219c4e8be26SVolodymyr Babchuk 		goto err;
220c4e8be26SVolodymyr Babchuk 	}
221c4e8be26SVolodymyr Babchuk 
222c4e8be26SVolodymyr Babchuk 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
223c2e4eb43SAnton Rybakov 				      MEM_AREA_SEC_RAM_OVERALL,
224c2e4eb43SAnton Rybakov 				      core_mmu_get_total_pages_size());
225c4e8be26SVolodymyr Babchuk 	assert(prtn->tables_va);
226c4e8be26SVolodymyr Babchuk 
227c4e8be26SVolodymyr Babchuk 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
228c4e8be26SVolodymyr Babchuk 	if (!prtn->mmu_prtn) {
229bddb2f89SJens Wiklander 		res = TEE_ERROR_OUT_OF_MEMORY;
230c4e8be26SVolodymyr Babchuk 		goto err;
231c4e8be26SVolodymyr Babchuk 	}
232c4e8be26SVolodymyr Babchuk 
2332f2f69dfSJens Wiklander 	res = prepare_memory_map(&prtn->mem_map,
2342f2f69dfSJens Wiklander 				 tee_mm_get_smem(prtn->tee_ram));
235b8ef8d0bSJens Wiklander 	if (res)
236c4e8be26SVolodymyr Babchuk 		goto err;
237c4e8be26SVolodymyr Babchuk 
238b8ef8d0bSJens Wiklander 	core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
239c4e8be26SVolodymyr Babchuk 
240c4e8be26SVolodymyr Babchuk 	original_data_pa = virt_to_phys(__data_start);
241c4e8be26SVolodymyr Babchuk 	/* Switch to guest's mappings */
242c4e8be26SVolodymyr Babchuk 	core_mmu_set_prtn(prtn->mmu_prtn);
243c4e8be26SVolodymyr Babchuk 
244c4e8be26SVolodymyr Babchuk 	/* clear .bss */
245c4e8be26SVolodymyr Babchuk 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
246c4e8be26SVolodymyr Babchuk 
247c4e8be26SVolodymyr Babchuk 	/* copy .data section from R/O original */
248c4e8be26SVolodymyr Babchuk 	memcpy(__data_start,
249c2e4eb43SAnton Rybakov 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
250c2e4eb43SAnton Rybakov 			    __data_end - __data_start),
251c4e8be26SVolodymyr Babchuk 	       __data_end - __data_start);
252c4e8be26SVolodymyr Babchuk 
253bddb2f89SJens Wiklander 	return TEE_SUCCESS;
254c4e8be26SVolodymyr Babchuk 
255c4e8be26SVolodymyr Babchuk err:
256c4e8be26SVolodymyr Babchuk 	if (prtn->tee_ram)
257c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->tee_ram);
258c4e8be26SVolodymyr Babchuk 	if (prtn->ta_ram)
259c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->ta_ram);
260c4e8be26SVolodymyr Babchuk 	if (prtn->tables)
261c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->tables);
262c4e8be26SVolodymyr Babchuk 	nex_free(prtn->mmu_prtn);
263b8ef8d0bSJens Wiklander 	nex_free(prtn->mem_map.map);
264c4e8be26SVolodymyr Babchuk 
265bddb2f89SJens Wiklander 	return res;
266c4e8be26SVolodymyr Babchuk }
267c4e8be26SVolodymyr Babchuk 
destroy_gsd(struct guest_partition * prtn,bool free_only)268beb90210SJens Wiklander static void destroy_gsd(struct guest_partition *prtn, bool free_only)
269beb90210SJens Wiklander {
270beb90210SJens Wiklander 	size_t n = 0;
271beb90210SJens Wiklander 
272beb90210SJens Wiklander 	for (n = 0; n < gsd_count; n++) {
273beb90210SJens Wiklander 		if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
274beb90210SJens Wiklander 			gsd_array[n].destroy(prtn->data_array[n]);
275beb90210SJens Wiklander 		nex_free(prtn->data_array[n]);
276beb90210SJens Wiklander 	}
277beb90210SJens Wiklander 	nex_free(prtn->data_array);
278beb90210SJens Wiklander 	prtn->data_array = NULL;
279beb90210SJens Wiklander }
280beb90210SJens Wiklander 
alloc_gsd(struct guest_partition * prtn)281beb90210SJens Wiklander static TEE_Result alloc_gsd(struct guest_partition *prtn)
282beb90210SJens Wiklander {
283beb90210SJens Wiklander 	unsigned int n = 0;
284beb90210SJens Wiklander 
285beb90210SJens Wiklander 	if (!gsd_count)
286beb90210SJens Wiklander 		return TEE_SUCCESS;
287beb90210SJens Wiklander 
288beb90210SJens Wiklander 	prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
289beb90210SJens Wiklander 	if (!prtn->data_array)
290beb90210SJens Wiklander 		return TEE_ERROR_OUT_OF_MEMORY;
291beb90210SJens Wiklander 
292beb90210SJens Wiklander 	for (n = 0; n < gsd_count; n++) {
293beb90210SJens Wiklander 		prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
294beb90210SJens Wiklander 		if (!prtn->data_array[n]) {
295beb90210SJens Wiklander 			destroy_gsd(prtn, true /*free_only*/);
296beb90210SJens Wiklander 			return TEE_ERROR_OUT_OF_MEMORY;
297beb90210SJens Wiklander 		}
298beb90210SJens Wiklander 	}
299beb90210SJens Wiklander 
300beb90210SJens Wiklander 	return TEE_SUCCESS;
301beb90210SJens Wiklander }
virt_guest_created(uint16_t guest_id)302bddb2f89SJens Wiklander TEE_Result virt_guest_created(uint16_t guest_id)
303c4e8be26SVolodymyr Babchuk {
304bddb2f89SJens Wiklander 	struct guest_partition *prtn = NULL;
305bddb2f89SJens Wiklander 	TEE_Result res = TEE_SUCCESS;
306bddb2f89SJens Wiklander 	uint32_t exceptions = 0;
307c4e8be26SVolodymyr Babchuk 
3082429722fSYuvraj Sakshith 	if (guest_id == HYP_CLNT_ID)
3092429722fSYuvraj Sakshith 		return TEE_ERROR_BAD_PARAMETERS;
3102429722fSYuvraj Sakshith 
311c4e8be26SVolodymyr Babchuk 	prtn = nex_calloc(1, sizeof(*prtn));
312c4e8be26SVolodymyr Babchuk 	if (!prtn)
313bddb2f89SJens Wiklander 		return TEE_ERROR_OUT_OF_MEMORY;
314c4e8be26SVolodymyr Babchuk 
315beb90210SJens Wiklander 	res = alloc_gsd(prtn);
316beb90210SJens Wiklander 	if (res)
317beb90210SJens Wiklander 		goto err_free_prtn;
318beb90210SJens Wiklander 
319c4e8be26SVolodymyr Babchuk 	prtn->id = guest_id;
320c4e8be26SVolodymyr Babchuk 	mutex_init(&prtn->mutex);
321c4e8be26SVolodymyr Babchuk 	refcount_set(&prtn->refc, 1);
322bddb2f89SJens Wiklander 	res = configure_guest_prtn_mem(prtn);
323ac1c95ddSJens Wiklander 	if (res)
324beb90210SJens Wiklander 		goto err_free_gsd;
325c4e8be26SVolodymyr Babchuk 
326c4e8be26SVolodymyr Babchuk 	set_current_prtn(prtn);
327c4e8be26SVolodymyr Babchuk 
328efcc90b2SJens Wiklander 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
329a6d75fa2SJens Wiklander 	/*
330a6d75fa2SJens Wiklander 	 * The TA memory is registered in the core pool to allow it to be
331a6d75fa2SJens Wiklander 	 * used for both core and TA physical memory allocations.
332a6d75fa2SJens Wiklander 	 */
333a6d75fa2SJens Wiklander 	phys_mem_init(tee_mm_get_smem(prtn->ta_ram),
334a6d75fa2SJens Wiklander 		      tee_mm_get_bytes(prtn->ta_ram), 0, 0);
3350e12fb0cSJens Wiklander 	page_alloc_init();
336c4e8be26SVolodymyr Babchuk 	/* Initialize threads */
337*91d4649dSJens Wiklander 	thread_init_threads(CFG_NUM_THREADS);
338bd59a6adSJens Wiklander 	/* Do the preinitcalls */
339bd59a6adSJens Wiklander 	call_preinitcalls();
340c4e8be26SVolodymyr Babchuk 
341c4e8be26SVolodymyr Babchuk 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
342c4e8be26SVolodymyr Babchuk 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
343c4e8be26SVolodymyr Babchuk 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
344c4e8be26SVolodymyr Babchuk 
345c4e8be26SVolodymyr Babchuk 	IMSG("Added guest %d", guest_id);
346c4e8be26SVolodymyr Babchuk 
347c4e8be26SVolodymyr Babchuk 	set_current_prtn(NULL);
348c4e8be26SVolodymyr Babchuk 	core_mmu_set_default_prtn();
349bddb2f89SJens Wiklander 
350bddb2f89SJens Wiklander 	return TEE_SUCCESS;
351ac1c95ddSJens Wiklander 
352beb90210SJens Wiklander err_free_gsd:
353beb90210SJens Wiklander 	destroy_gsd(prtn, true /*free_only*/);
354ac1c95ddSJens Wiklander err_free_prtn:
355ac1c95ddSJens Wiklander 	nex_free(prtn);
356ac1c95ddSJens Wiklander 	return res;
357c4e8be26SVolodymyr Babchuk }
358c4e8be26SVolodymyr Babchuk 
3594078bcdeSJens Wiklander static bool
prtn_have_remaining_resources(struct guest_partition * prtn __maybe_unused)3604078bcdeSJens Wiklander prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
3614078bcdeSJens Wiklander {
3624078bcdeSJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
3634078bcdeSJens Wiklander 	int i = 0;
3644078bcdeSJens Wiklander 
3654078bcdeSJens Wiklander 	if (prtn->cookie_count)
3664078bcdeSJens Wiklander 		return true;
3674078bcdeSJens Wiklander 	bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
3684078bcdeSJens Wiklander 	return i >= 0;
3694078bcdeSJens Wiklander #else
3704078bcdeSJens Wiklander 	return false;
3714078bcdeSJens Wiklander #endif
3724078bcdeSJens Wiklander }
3734078bcdeSJens Wiklander 
get_prtn(struct guest_partition * prtn)374ac1c95ddSJens Wiklander static void get_prtn(struct guest_partition *prtn)
375ac1c95ddSJens Wiklander {
376ac1c95ddSJens Wiklander 	if (!refcount_inc(&prtn->refc))
377ac1c95ddSJens Wiklander 		panic();
378ac1c95ddSJens Wiklander }
379ac1c95ddSJens Wiklander 
virt_get_guest_id(struct guest_partition * prtn)380a755a64fSJens Wiklander uint16_t virt_get_guest_id(struct guest_partition *prtn)
381a755a64fSJens Wiklander {
382a755a64fSJens Wiklander 	if (!prtn)
383a755a64fSJens Wiklander 		return 0;
384a755a64fSJens Wiklander 	return prtn->id;
385a755a64fSJens Wiklander }
386a755a64fSJens Wiklander 
find_guest_by_id_unlocked(uint16_t guest_id)387ac1c95ddSJens Wiklander static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
388ac1c95ddSJens Wiklander {
389ac1c95ddSJens Wiklander 	struct guest_partition *prtn = NULL;
390ac1c95ddSJens Wiklander 
391ac1c95ddSJens Wiklander 	LIST_FOREACH(prtn, &prtn_list, link)
392ac1c95ddSJens Wiklander 		if (!prtn->shutting_down && prtn->id == guest_id)
393ac1c95ddSJens Wiklander 			return prtn;
394ac1c95ddSJens Wiklander 
395ac1c95ddSJens Wiklander 	return NULL;
396ac1c95ddSJens Wiklander }
397ac1c95ddSJens Wiklander 
virt_next_guest(struct guest_partition * prtn)398a951eb5fSJens Wiklander struct guest_partition *virt_next_guest(struct guest_partition *prtn)
399a951eb5fSJens Wiklander {
400a951eb5fSJens Wiklander 	struct guest_partition *ret = NULL;
401a951eb5fSJens Wiklander 	uint32_t exceptions = 0;
402a951eb5fSJens Wiklander 
403a951eb5fSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
404a951eb5fSJens Wiklander 	if (prtn)
405a951eb5fSJens Wiklander 		ret = LIST_NEXT(prtn, link);
406a951eb5fSJens Wiklander 	else
407a951eb5fSJens Wiklander 		ret = LIST_FIRST(&prtn_list);
408a951eb5fSJens Wiklander 
409a951eb5fSJens Wiklander 	while (ret && ret->shutting_down)
410a951eb5fSJens Wiklander 		ret = LIST_NEXT(prtn, link);
411a951eb5fSJens Wiklander 	if (ret)
412a951eb5fSJens Wiklander 		get_prtn(ret);
413a951eb5fSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
414a951eb5fSJens Wiklander 
415a951eb5fSJens Wiklander 	virt_put_guest(prtn);
416a951eb5fSJens Wiklander 
417a951eb5fSJens Wiklander 	return ret;
418a951eb5fSJens Wiklander }
419a951eb5fSJens Wiklander 
virt_get_current_guest(void)42029e682bdSJens Wiklander struct guest_partition *virt_get_current_guest(void)
42129e682bdSJens Wiklander {
42229e682bdSJens Wiklander 	struct guest_partition *prtn = get_current_prtn();
42329e682bdSJens Wiklander 
42429e682bdSJens Wiklander 	if (prtn)
42529e682bdSJens Wiklander 		get_prtn(prtn);
42629e682bdSJens Wiklander 	return prtn;
42729e682bdSJens Wiklander }
42829e682bdSJens Wiklander 
virt_get_guest(uint16_t guest_id)429ac1c95ddSJens Wiklander struct guest_partition *virt_get_guest(uint16_t guest_id)
430c4e8be26SVolodymyr Babchuk {
4314078bcdeSJens Wiklander 	struct guest_partition *prtn = NULL;
4324078bcdeSJens Wiklander 	uint32_t exceptions = 0;
433c4e8be26SVolodymyr Babchuk 
434c4e8be26SVolodymyr Babchuk 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
435ac1c95ddSJens Wiklander 	prtn = find_guest_by_id_unlocked(guest_id);
436ac1c95ddSJens Wiklander 	if (prtn)
437ac1c95ddSJens Wiklander 		get_prtn(prtn);
438ac1c95ddSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
439c4e8be26SVolodymyr Babchuk 
440ac1c95ddSJens Wiklander 	return prtn;
4414078bcdeSJens Wiklander }
442ac1c95ddSJens Wiklander 
virt_put_guest(struct guest_partition * prtn)443ac1c95ddSJens Wiklander void virt_put_guest(struct guest_partition *prtn)
444ac1c95ddSJens Wiklander {
445ac1c95ddSJens Wiklander 	if (prtn && refcount_dec(&prtn->refc)) {
446ac1c95ddSJens Wiklander 		uint32_t exceptions = 0;
447ac1c95ddSJens Wiklander 		bool do_free = true;
448ac1c95ddSJens Wiklander 
449ac1c95ddSJens Wiklander 		assert(prtn->shutting_down);
450ac1c95ddSJens Wiklander 
451ac1c95ddSJens Wiklander 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
452c4e8be26SVolodymyr Babchuk 		LIST_REMOVE(prtn, link);
4534078bcdeSJens Wiklander 		if (prtn_have_remaining_resources(prtn)) {
454ac1c95ddSJens Wiklander 			LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
4554078bcdeSJens Wiklander 			/*
4564078bcdeSJens Wiklander 			 * Delay the nex_free() until
4574078bcdeSJens Wiklander 			 * virt_reclaim_cookie_from_destroyed_guest()
4584078bcdeSJens Wiklander 			 * is done with this partition.
4594078bcdeSJens Wiklander 			 */
4604078bcdeSJens Wiklander 			do_free = false;
4614078bcdeSJens Wiklander 		}
462c4e8be26SVolodymyr Babchuk 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
463c4e8be26SVolodymyr Babchuk 
464beb90210SJens Wiklander 		destroy_gsd(prtn, false /*!free_only*/);
465c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->tee_ram);
4664078bcdeSJens Wiklander 		prtn->tee_ram = NULL;
467c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->ta_ram);
4684078bcdeSJens Wiklander 		prtn->ta_ram = NULL;
469c4e8be26SVolodymyr Babchuk 		tee_mm_free(prtn->tables);
4704078bcdeSJens Wiklander 		prtn->tables = NULL;
471c4e8be26SVolodymyr Babchuk 		core_free_mmu_prtn(prtn->mmu_prtn);
4724078bcdeSJens Wiklander 		prtn->mmu_prtn = NULL;
473b8ef8d0bSJens Wiklander 		nex_free(prtn->mem_map.map);
474b8ef8d0bSJens Wiklander 		prtn->mem_map.map = NULL;
4754078bcdeSJens Wiklander 		if (do_free)
476c4e8be26SVolodymyr Babchuk 			nex_free(prtn);
477ac1c95ddSJens Wiklander 	}
478ac1c95ddSJens Wiklander }
479ac1c95ddSJens Wiklander 
virt_guest_destroyed(uint16_t guest_id)480ac1c95ddSJens Wiklander TEE_Result virt_guest_destroyed(uint16_t guest_id)
481ac1c95ddSJens Wiklander {
482ac1c95ddSJens Wiklander 	struct guest_partition *prtn = NULL;
483ac1c95ddSJens Wiklander 	uint32_t exceptions = 0;
484ac1c95ddSJens Wiklander 
485ac1c95ddSJens Wiklander 	IMSG("Removing guest %"PRId16, guest_id);
486ac1c95ddSJens Wiklander 
487ac1c95ddSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
488ac1c95ddSJens Wiklander 
489ac1c95ddSJens Wiklander 	prtn = find_guest_by_id_unlocked(guest_id);
490d237e616SJens Wiklander 	if (prtn && !prtn->got_guest_destroyed)
491d237e616SJens Wiklander 		prtn->got_guest_destroyed = true;
492d237e616SJens Wiklander 	else
493d237e616SJens Wiklander 		prtn = NULL;
494ac1c95ddSJens Wiklander 
495ac1c95ddSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
496ac1c95ddSJens Wiklander 
497d237e616SJens Wiklander 	if (prtn) {
498d237e616SJens Wiklander 		notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
499d237e616SJens Wiklander 
500d237e616SJens Wiklander 		exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
501d237e616SJens Wiklander 		prtn->shutting_down = true;
502d237e616SJens Wiklander 		cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
503d237e616SJens Wiklander 
504ac1c95ddSJens Wiklander 		virt_put_guest(prtn);
505d237e616SJens Wiklander 	} else {
506c4e8be26SVolodymyr Babchuk 		EMSG("Client with id %d is not found", guest_id);
507d237e616SJens Wiklander 	}
508c4e8be26SVolodymyr Babchuk 
509bddb2f89SJens Wiklander 	return TEE_SUCCESS;
510c4e8be26SVolodymyr Babchuk }
511c4e8be26SVolodymyr Babchuk 
virt_set_guest(uint16_t guest_id)512bddb2f89SJens Wiklander TEE_Result virt_set_guest(uint16_t guest_id)
513c4e8be26SVolodymyr Babchuk {
514ac1c95ddSJens Wiklander 	struct guest_partition *prtn = get_current_prtn();
515c4e8be26SVolodymyr Babchuk 
516c4e8be26SVolodymyr Babchuk 	/* This can be true only if we return from IRQ RPC */
517c4e8be26SVolodymyr Babchuk 	if (prtn && prtn->id == guest_id)
518bddb2f89SJens Wiklander 		return TEE_SUCCESS;
519c4e8be26SVolodymyr Babchuk 
520c4e8be26SVolodymyr Babchuk 	if (prtn)
521c4e8be26SVolodymyr Babchuk 		panic("Virtual guest partition is already set");
522c4e8be26SVolodymyr Babchuk 
523ac1c95ddSJens Wiklander 	prtn = virt_get_guest(guest_id);
524ac1c95ddSJens Wiklander 	if (!prtn)
525ac1c95ddSJens Wiklander 		return TEE_ERROR_ITEM_NOT_FOUND;
526ac1c95ddSJens Wiklander 
527c4e8be26SVolodymyr Babchuk 	set_current_prtn(prtn);
528c4e8be26SVolodymyr Babchuk 	core_mmu_set_prtn(prtn->mmu_prtn);
529c4e8be26SVolodymyr Babchuk 
530ac1c95ddSJens Wiklander 	return TEE_SUCCESS;
531c4e8be26SVolodymyr Babchuk }
532c4e8be26SVolodymyr Babchuk 
virt_unset_guest(void)533c4e8be26SVolodymyr Babchuk void virt_unset_guest(void)
534c4e8be26SVolodymyr Babchuk {
535c4e8be26SVolodymyr Babchuk 	struct guest_partition *prtn = get_current_prtn();
536c4e8be26SVolodymyr Babchuk 
537c4e8be26SVolodymyr Babchuk 	if (!prtn)
538c4e8be26SVolodymyr Babchuk 		return;
539c4e8be26SVolodymyr Babchuk 
540c4e8be26SVolodymyr Babchuk 	set_current_prtn(NULL);
541c4e8be26SVolodymyr Babchuk 	core_mmu_set_default_prtn();
542ac1c95ddSJens Wiklander 	virt_put_guest(prtn);
543c4e8be26SVolodymyr Babchuk }
544c4e8be26SVolodymyr Babchuk 
virt_on_stdcall(void)545c4e8be26SVolodymyr Babchuk void virt_on_stdcall(void)
546c4e8be26SVolodymyr Babchuk {
547c4e8be26SVolodymyr Babchuk 	struct guest_partition *prtn = get_current_prtn();
548c4e8be26SVolodymyr Babchuk 
549c4e8be26SVolodymyr Babchuk 	/* Initialize runtime on first std call */
550c4e8be26SVolodymyr Babchuk 	if (!prtn->runtime_initialized) {
551c4e8be26SVolodymyr Babchuk 		mutex_lock(&prtn->mutex);
552c4e8be26SVolodymyr Babchuk 		if (!prtn->runtime_initialized) {
553c4e8be26SVolodymyr Babchuk 			init_tee_runtime();
55411d8578dSJens Wiklander 			call_driver_initcalls();
555c4e8be26SVolodymyr Babchuk 			prtn->runtime_initialized = true;
556c4e8be26SVolodymyr Babchuk 		}
557c4e8be26SVolodymyr Babchuk 		mutex_unlock(&prtn->mutex);
558c4e8be26SVolodymyr Babchuk 	}
559c4e8be26SVolodymyr Babchuk }
560c4e8be26SVolodymyr Babchuk 
virt_get_memory_map(void)561b8ef8d0bSJens Wiklander struct memory_map *virt_get_memory_map(void)
562c4e8be26SVolodymyr Babchuk {
563c4e8be26SVolodymyr Babchuk 	struct guest_partition *prtn;
564c4e8be26SVolodymyr Babchuk 
565c4e8be26SVolodymyr Babchuk 	prtn = get_current_prtn();
566c4e8be26SVolodymyr Babchuk 
567c4e8be26SVolodymyr Babchuk 	if (!prtn)
568c4e8be26SVolodymyr Babchuk 		return NULL;
569c4e8be26SVolodymyr Babchuk 
570b8ef8d0bSJens Wiklander 	return &prtn->mem_map;
571c4e8be26SVolodymyr Babchuk }
572c4e8be26SVolodymyr Babchuk 
573a65dd3a6SJens Wiklander #ifdef CFG_CORE_SEL1_SPMC
find_cookie(struct guest_partition * prtn,uint64_t cookie)574a65dd3a6SJens Wiklander static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
575a65dd3a6SJens Wiklander {
576a65dd3a6SJens Wiklander 	int i = 0;
577a65dd3a6SJens Wiklander 
578a65dd3a6SJens Wiklander 	for (i = 0; i < prtn->cookie_count; i++)
579a65dd3a6SJens Wiklander 		if (prtn->cookies[i] == cookie)
580a65dd3a6SJens Wiklander 			return i;
581a65dd3a6SJens Wiklander 	return -1;
582a65dd3a6SJens Wiklander }
583a65dd3a6SJens Wiklander 
find_prtn_cookie(uint64_t cookie,int * idx)584a65dd3a6SJens Wiklander static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
585a65dd3a6SJens Wiklander {
586a65dd3a6SJens Wiklander 	struct guest_partition *prtn = NULL;
587a65dd3a6SJens Wiklander 	int i = 0;
588a65dd3a6SJens Wiklander 
589a65dd3a6SJens Wiklander 	LIST_FOREACH(prtn, &prtn_list, link) {
590a65dd3a6SJens Wiklander 		i = find_cookie(prtn, cookie);
591a65dd3a6SJens Wiklander 		if (i >= 0) {
592a65dd3a6SJens Wiklander 			if (idx)
593a65dd3a6SJens Wiklander 				*idx = i;
594a65dd3a6SJens Wiklander 			return prtn;
595a65dd3a6SJens Wiklander 		}
596a65dd3a6SJens Wiklander 	}
597a65dd3a6SJens Wiklander 
598a65dd3a6SJens Wiklander 	return NULL;
599a65dd3a6SJens Wiklander }
600a65dd3a6SJens Wiklander 
virt_add_cookie_to_current_guest(uint64_t cookie)601a65dd3a6SJens Wiklander TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
602a65dd3a6SJens Wiklander {
603a65dd3a6SJens Wiklander 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
604a65dd3a6SJens Wiklander 	struct guest_partition *prtn = NULL;
605a7400fcdSJens Wiklander 	uint32_t exceptions = 0;
606a65dd3a6SJens Wiklander 
607a7400fcdSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
608a65dd3a6SJens Wiklander 	if (find_prtn_cookie(cookie, NULL))
609a65dd3a6SJens Wiklander 		goto out;
610a65dd3a6SJens Wiklander 
611a65dd3a6SJens Wiklander 	prtn = current_partition[get_core_pos()];
612a65dd3a6SJens Wiklander 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
613a65dd3a6SJens Wiklander 		prtn->cookies[prtn->cookie_count] = cookie;
614a65dd3a6SJens Wiklander 		prtn->cookie_count++;
615a65dd3a6SJens Wiklander 		res = TEE_SUCCESS;
616a65dd3a6SJens Wiklander 	}
617a65dd3a6SJens Wiklander out:
618a7400fcdSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
619a65dd3a6SJens Wiklander 
620a65dd3a6SJens Wiklander 	return res;
621a65dd3a6SJens Wiklander }
622a65dd3a6SJens Wiklander 
virt_remove_cookie(uint64_t cookie)623a65dd3a6SJens Wiklander void virt_remove_cookie(uint64_t cookie)
624a65dd3a6SJens Wiklander {
625a65dd3a6SJens Wiklander 	struct guest_partition *prtn = NULL;
626a7400fcdSJens Wiklander 	uint32_t exceptions = 0;
627a65dd3a6SJens Wiklander 	int i = 0;
628a65dd3a6SJens Wiklander 
629a7400fcdSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
630a65dd3a6SJens Wiklander 	prtn = find_prtn_cookie(cookie, &i);
631a65dd3a6SJens Wiklander 	if (prtn) {
632a65dd3a6SJens Wiklander 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
633a65dd3a6SJens Wiklander 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
634a65dd3a6SJens Wiklander 		prtn->cookie_count--;
635a65dd3a6SJens Wiklander 	}
636a7400fcdSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
637a65dd3a6SJens Wiklander }
638a65dd3a6SJens Wiklander 
virt_find_guest_by_cookie(uint64_t cookie)639a65dd3a6SJens Wiklander uint16_t virt_find_guest_by_cookie(uint64_t cookie)
640a65dd3a6SJens Wiklander {
641a65dd3a6SJens Wiklander 	struct guest_partition *prtn = NULL;
642a7400fcdSJens Wiklander 	uint32_t exceptions = 0;
643a65dd3a6SJens Wiklander 	uint16_t ret = 0;
644a65dd3a6SJens Wiklander 
645a7400fcdSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
646a65dd3a6SJens Wiklander 	prtn = find_prtn_cookie(cookie, NULL);
647a65dd3a6SJens Wiklander 	if (prtn)
648a65dd3a6SJens Wiklander 		ret = prtn->id;
649a65dd3a6SJens Wiklander 
650a7400fcdSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
651a65dd3a6SJens Wiklander 
652a65dd3a6SJens Wiklander 	return ret;
653a65dd3a6SJens Wiklander }
6543e0b361eSJens Wiklander 
virt_get_shm_bits(void)6553e0b361eSJens Wiklander bitstr_t *virt_get_shm_bits(void)
6563e0b361eSJens Wiklander {
6573e0b361eSJens Wiklander 	return get_current_prtn()->shm_bits;
6583e0b361eSJens Wiklander }
6594078bcdeSJens Wiklander 
reclaim_cookie(struct guest_partition * prtn,uint64_t cookie)6604078bcdeSJens Wiklander static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
6614078bcdeSJens Wiklander {
6624078bcdeSJens Wiklander 	if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
6634078bcdeSJens Wiklander 		size_t n = 0;
6644078bcdeSJens Wiklander 
6654078bcdeSJens Wiklander 		for (n = 0; n < prtn->cookie_count; n++) {
6664078bcdeSJens Wiklander 			if (prtn->cookies[n] == cookie) {
6674078bcdeSJens Wiklander 				memmove(prtn->cookies + n,
6684078bcdeSJens Wiklander 					prtn->cookies + n + 1,
6694078bcdeSJens Wiklander 					sizeof(uint64_t) *
6704078bcdeSJens Wiklander 						(prtn->cookie_count - n - 1));
6714078bcdeSJens Wiklander 				prtn->cookie_count--;
6724078bcdeSJens Wiklander 				return TEE_SUCCESS;
6734078bcdeSJens Wiklander 			}
6744078bcdeSJens Wiklander 		}
6754078bcdeSJens Wiklander 	} else {
6764078bcdeSJens Wiklander 		uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
6774078bcdeSJens Wiklander 				SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
6784078bcdeSJens Wiklander 					  FFA_MEMORY_HANDLE_PRTN_SHIFT);
6794078bcdeSJens Wiklander 		int64_t i = cookie & ~mask;
6804078bcdeSJens Wiklander 
6814078bcdeSJens Wiklander 		if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
6824078bcdeSJens Wiklander 		    bit_test(prtn->shm_bits, i)) {
6834078bcdeSJens Wiklander 			bit_clear(prtn->shm_bits, i);
6844078bcdeSJens Wiklander 			return TEE_SUCCESS;
6854078bcdeSJens Wiklander 		}
6864078bcdeSJens Wiklander 	}
6874078bcdeSJens Wiklander 
6884078bcdeSJens Wiklander 	return TEE_ERROR_ITEM_NOT_FOUND;
6894078bcdeSJens Wiklander }
6904078bcdeSJens Wiklander 
virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,uint64_t cookie)6914078bcdeSJens Wiklander TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
6924078bcdeSJens Wiklander 						    uint64_t cookie)
6934078bcdeSJens Wiklander 
6944078bcdeSJens Wiklander {
6954078bcdeSJens Wiklander 	struct guest_partition *prtn = NULL;
6964078bcdeSJens Wiklander 	TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
6974078bcdeSJens Wiklander 	uint32_t exceptions = 0;
6984078bcdeSJens Wiklander 
6994078bcdeSJens Wiklander 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
7004078bcdeSJens Wiklander 	LIST_FOREACH(prtn, &prtn_destroy_list, link) {
7014078bcdeSJens Wiklander 		if (prtn->id == guest_id) {
7024078bcdeSJens Wiklander 			res = reclaim_cookie(prtn, cookie);
7034078bcdeSJens Wiklander 			if (prtn_have_remaining_resources(prtn))
7044078bcdeSJens Wiklander 				prtn = NULL;
7054078bcdeSJens Wiklander 			else
7064078bcdeSJens Wiklander 				LIST_REMOVE(prtn, link);
7074078bcdeSJens Wiklander 			break;
7084078bcdeSJens Wiklander 		}
7094078bcdeSJens Wiklander 	}
7104078bcdeSJens Wiklander 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
7114078bcdeSJens Wiklander 
7124078bcdeSJens Wiklander 	nex_free(prtn);
7134078bcdeSJens Wiklander 
7144078bcdeSJens Wiklander 	return res;
7154078bcdeSJens Wiklander }
716a65dd3a6SJens Wiklander #endif /*CFG_CORE_SEL1_SPMC*/
717beb90210SJens Wiklander 
virt_add_guest_spec_data(unsigned int * data_id,size_t data_size,void (* data_destroy)(void * data))718beb90210SJens Wiklander TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
719beb90210SJens Wiklander 				    void (*data_destroy)(void *data))
720beb90210SJens Wiklander {
721beb90210SJens Wiklander 	void *p = NULL;
722beb90210SJens Wiklander 
723beb90210SJens Wiklander 	/*
724beb90210SJens Wiklander 	 * This function only executes successfully in a single threaded
725beb90210SJens Wiklander 	 * environment before exiting to the normal world the first time.
726beb90210SJens Wiklander 	 * If add_disabled is true, it means we're not in this environment
727beb90210SJens Wiklander 	 * any longer.
728beb90210SJens Wiklander 	 */
729beb90210SJens Wiklander 
730beb90210SJens Wiklander 	if (add_disabled)
731beb90210SJens Wiklander 		return TEE_ERROR_BAD_PARAMETERS;
732beb90210SJens Wiklander 
733beb90210SJens Wiklander 	p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
734beb90210SJens Wiklander 	if (!p)
735beb90210SJens Wiklander 		return TEE_ERROR_OUT_OF_MEMORY;
736beb90210SJens Wiklander 	gsd_array = p;
737beb90210SJens Wiklander 
738beb90210SJens Wiklander 	gsd_array[gsd_count] = (struct guest_spec_data){
739beb90210SJens Wiklander 		.size = data_size,
740beb90210SJens Wiklander 		.destroy = data_destroy,
741beb90210SJens Wiklander 	};
742beb90210SJens Wiklander 	*data_id = gsd_count + 1;
743beb90210SJens Wiklander 	gsd_count++;
744beb90210SJens Wiklander 	return TEE_SUCCESS;
745beb90210SJens Wiklander }
746beb90210SJens Wiklander 
virt_get_guest_spec_data(struct guest_partition * prtn,unsigned int data_id)747beb90210SJens Wiklander void *virt_get_guest_spec_data(struct guest_partition *prtn,
748beb90210SJens Wiklander 			       unsigned int data_id)
749beb90210SJens Wiklander {
750beb90210SJens Wiklander 	assert(data_id);
751beb90210SJens Wiklander 	if (!data_id || !prtn || data_id > gsd_count)
752beb90210SJens Wiklander 		return NULL;
753beb90210SJens Wiklander 	return prtn->data_array[data_id - 1];
754beb90210SJens Wiklander }
755beb90210SJens Wiklander 
virt_disable_add(void)756beb90210SJens Wiklander static TEE_Result virt_disable_add(void)
757beb90210SJens Wiklander {
758beb90210SJens Wiklander 	add_disabled = true;
759beb90210SJens Wiklander 
760beb90210SJens Wiklander 	return TEE_SUCCESS;
761beb90210SJens Wiklander }
762beb90210SJens Wiklander nex_release_init_resource(virt_disable_add);
763