1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2018, EPAM Systems. All rights reserved.
4 * Copyright (c) 2023-2024, Linaro Limited
5 */
6
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/notif.h>
14 #include <kernel/panic.h>
15 #include <kernel/refcount.h>
16 #include <kernel/spinlock.h>
17 #include <kernel/thread_spmc.h>
18 #include <kernel/virtualization.h>
19 #include <mm/core_memprot.h>
20 #include <mm/core_mmu.h>
21 #include <mm/page_alloc.h>
22 #include <mm/phys_mem.h>
23 #include <mm/tee_mm.h>
24 #include <platform_config.h>
25 #include <sm/optee_smc.h>
26 #include <string.h>
27 #include <string_ext.h>
28 #include <util.h>
29
30 LIST_HEAD(prtn_list_head, guest_partition);
31
32 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
33
34 static struct prtn_list_head prtn_list __nex_data =
35 LIST_HEAD_INITIALIZER(prtn_list);
36 static struct prtn_list_head prtn_destroy_list __nex_data =
37 LIST_HEAD_INITIALIZER(prtn_destroy_list);
38
39 /* Memory used by OP-TEE core */
40 struct memory_map *kmem_map __nex_bss;
41
42 struct guest_spec_data {
43 size_t size;
44 void (*destroy)(void *data);
45 };
46
47 static bool add_disabled __nex_bss;
48 static unsigned gsd_count __nex_bss;
49 static struct guest_spec_data *gsd_array __nex_bss;
50
51 struct guest_partition {
52 LIST_ENTRY(guest_partition) link;
53 struct mmu_partition *mmu_prtn;
54 struct memory_map mem_map;
55 struct mutex mutex;
56 void *tables_va;
57 tee_mm_entry_t *tee_ram;
58 tee_mm_entry_t *ta_ram;
59 tee_mm_entry_t *tables;
60 bool runtime_initialized;
61 bool got_guest_destroyed;
62 bool shutting_down;
63 uint16_t id;
64 struct refcount refc;
65 #ifdef CFG_CORE_SEL1_SPMC
66 uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
67 uint8_t cookie_count;
68 bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
69 #endif
70 void **data_array;
71 };
72
73 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
74
get_current_prtn(void)75 static struct guest_partition *get_current_prtn(void)
76 {
77 struct guest_partition *ret;
78 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
79
80 ret = current_partition[get_core_pos()];
81
82 thread_unmask_exceptions(exceptions);
83
84 return ret;
85 }
86
virt_get_current_guest_id(void)87 uint16_t virt_get_current_guest_id(void)
88 {
89 struct guest_partition *prtn = get_current_prtn();
90
91 if (!prtn)
92 return 0;
93 return prtn->id;
94 }
95
set_current_prtn(struct guest_partition * prtn)96 static void set_current_prtn(struct guest_partition *prtn)
97 {
98 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
99
100 current_partition[get_core_pos()] = prtn;
101
102 thread_unmask_exceptions(exceptions);
103 }
104
get_ta_ram_size(void)105 static size_t get_ta_ram_size(void)
106 {
107 size_t ta_size = nex_phys_mem_get_ta_size();
108
109 return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
110 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
111 }
112
prepare_memory_map(struct memory_map * mem_map,paddr_t tee_data)113 static TEE_Result prepare_memory_map(struct memory_map *mem_map,
114 paddr_t tee_data)
115 {
116 struct tee_mmap_region *map = NULL;
117 vaddr_t max_va = 0;
118 size_t n = 0;
119 /*
120 * This function assumes that at time of operation,
121 * kmemory_map (aka static_memory_map from core_mmu.c)
122 * will not be altered. This is true, because all
123 * changes to static_memory_map are done during
124 * OP-TEE initialization, while this function will
125 * called when hypervisor creates a guest.
126 */
127
128 /* Allocate entries for virtual guest map */
129 mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map));
130 if (!mem_map->map)
131 return TEE_ERROR_OUT_OF_MEMORY;
132 mem_map->count = kmem_map->count;
133 mem_map->alloc_count = kmem_map->count + 1;
134
135 memcpy(mem_map->map, kmem_map->map,
136 sizeof(*mem_map->map) * mem_map->count);
137
138 /* Map TEE .data and .bss sections */
139 for (n = 0; n < mem_map->count; n++) {
140 map = mem_map->map + n;
141 if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
142 map->type = MEM_AREA_TEE_RAM_RW;
143 map->attr = core_mmu_type_to_attr(map->type);
144 map->pa = tee_data;
145 }
146 if (map->va + map->size > max_va)
147 max_va = map->va + map->size;
148 }
149
150 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA));
151
152 for (n = 0; n < mem_map->count; n++)
153 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
154 teecore_memtype_name(mem_map->map[n].type),
155 mem_map->map[n].region_size, mem_map->map[n].pa,
156 mem_map->map[n].va, mem_map->map[n].size,
157 mem_map->map[n].attr);
158 return TEE_SUCCESS;
159 }
160
virt_init_memory(struct memory_map * mem_map,paddr_t secmem0_base,paddr_size_t secmem0_size,paddr_t secmem1_base,paddr_size_t secmem1_size)161 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base,
162 paddr_size_t secmem0_size, paddr_t secmem1_base,
163 paddr_size_t secmem1_size)
164 {
165 size_t n = 0;
166
167 /* Init page pool that covers all secure RAM */
168 nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base,
169 secmem1_size);
170
171 /* Carve out areas that are used by OP-TEE core */
172 for (n = 0; n < mem_map->count; n++) {
173 struct tee_mmap_region *map = mem_map->map + n;
174
175 switch (map->type) {
176 case MEM_AREA_TEE_RAM_RX:
177 case MEM_AREA_TEE_RAM_RO:
178 case MEM_AREA_NEX_RAM_RO:
179 case MEM_AREA_NEX_RAM_RW:
180 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
181 map->type, map->pa, map->pa + map->size);
182 if (!nex_phys_mem_alloc2(map->pa, map->size))
183 panic("Can't carve out used area");
184 break;
185 default:
186 continue;
187 }
188 }
189
190 kmem_map = mem_map;
191 }
192
193
configure_guest_prtn_mem(struct guest_partition * prtn)194 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
195 {
196 TEE_Result res = TEE_SUCCESS;
197 paddr_t original_data_pa = 0;
198
199 prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ);
200 if (!prtn->tee_ram) {
201 EMSG("Can't allocate memory for TEE runtime context");
202 res = TEE_ERROR_OUT_OF_MEMORY;
203 goto err;
204 }
205 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
206
207 prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size());
208 if (!prtn->ta_ram) {
209 EMSG("Can't allocate memory for TA data");
210 res = TEE_ERROR_OUT_OF_MEMORY;
211 goto err;
212 }
213 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
214
215 prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size());
216 if (!prtn->tables) {
217 EMSG("Can't allocate memory for page tables");
218 res = TEE_ERROR_OUT_OF_MEMORY;
219 goto err;
220 }
221
222 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
223 MEM_AREA_SEC_RAM_OVERALL,
224 core_mmu_get_total_pages_size());
225 assert(prtn->tables_va);
226
227 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
228 if (!prtn->mmu_prtn) {
229 res = TEE_ERROR_OUT_OF_MEMORY;
230 goto err;
231 }
232
233 res = prepare_memory_map(&prtn->mem_map,
234 tee_mm_get_smem(prtn->tee_ram));
235 if (res)
236 goto err;
237
238 core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map);
239
240 original_data_pa = virt_to_phys(__data_start);
241 /* Switch to guest's mappings */
242 core_mmu_set_prtn(prtn->mmu_prtn);
243
244 /* clear .bss */
245 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
246
247 /* copy .data section from R/O original */
248 memcpy(__data_start,
249 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
250 __data_end - __data_start),
251 __data_end - __data_start);
252
253 return TEE_SUCCESS;
254
255 err:
256 if (prtn->tee_ram)
257 tee_mm_free(prtn->tee_ram);
258 if (prtn->ta_ram)
259 tee_mm_free(prtn->ta_ram);
260 if (prtn->tables)
261 tee_mm_free(prtn->tables);
262 nex_free(prtn->mmu_prtn);
263 nex_free(prtn->mem_map.map);
264
265 return res;
266 }
267
destroy_gsd(struct guest_partition * prtn,bool free_only)268 static void destroy_gsd(struct guest_partition *prtn, bool free_only)
269 {
270 size_t n = 0;
271
272 for (n = 0; n < gsd_count; n++) {
273 if (!free_only && prtn->data_array[n] && gsd_array[n].destroy)
274 gsd_array[n].destroy(prtn->data_array[n]);
275 nex_free(prtn->data_array[n]);
276 }
277 nex_free(prtn->data_array);
278 prtn->data_array = NULL;
279 }
280
alloc_gsd(struct guest_partition * prtn)281 static TEE_Result alloc_gsd(struct guest_partition *prtn)
282 {
283 unsigned int n = 0;
284
285 if (!gsd_count)
286 return TEE_SUCCESS;
287
288 prtn->data_array = nex_calloc(gsd_count, sizeof(void *));
289 if (!prtn->data_array)
290 return TEE_ERROR_OUT_OF_MEMORY;
291
292 for (n = 0; n < gsd_count; n++) {
293 prtn->data_array[n] = nex_calloc(1, gsd_array[n].size);
294 if (!prtn->data_array[n]) {
295 destroy_gsd(prtn, true /*free_only*/);
296 return TEE_ERROR_OUT_OF_MEMORY;
297 }
298 }
299
300 return TEE_SUCCESS;
301 }
virt_guest_created(uint16_t guest_id)302 TEE_Result virt_guest_created(uint16_t guest_id)
303 {
304 struct guest_partition *prtn = NULL;
305 TEE_Result res = TEE_SUCCESS;
306 uint32_t exceptions = 0;
307
308 if (guest_id == HYP_CLNT_ID)
309 return TEE_ERROR_BAD_PARAMETERS;
310
311 prtn = nex_calloc(1, sizeof(*prtn));
312 if (!prtn)
313 return TEE_ERROR_OUT_OF_MEMORY;
314
315 res = alloc_gsd(prtn);
316 if (res)
317 goto err_free_prtn;
318
319 prtn->id = guest_id;
320 mutex_init(&prtn->mutex);
321 refcount_set(&prtn->refc, 1);
322 res = configure_guest_prtn_mem(prtn);
323 if (res)
324 goto err_free_gsd;
325
326 set_current_prtn(prtn);
327
328 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
329 /*
330 * The TA memory is registered in the core pool to allow it to be
331 * used for both core and TA physical memory allocations.
332 */
333 phys_mem_init(tee_mm_get_smem(prtn->ta_ram),
334 tee_mm_get_bytes(prtn->ta_ram), 0, 0);
335 page_alloc_init();
336 /* Initialize threads */
337 thread_init_threads(CFG_NUM_THREADS);
338 /* Do the preinitcalls */
339 call_preinitcalls();
340
341 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
342 LIST_INSERT_HEAD(&prtn_list, prtn, link);
343 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
344
345 IMSG("Added guest %d", guest_id);
346
347 set_current_prtn(NULL);
348 core_mmu_set_default_prtn();
349
350 return TEE_SUCCESS;
351
352 err_free_gsd:
353 destroy_gsd(prtn, true /*free_only*/);
354 err_free_prtn:
355 nex_free(prtn);
356 return res;
357 }
358
359 static bool
prtn_have_remaining_resources(struct guest_partition * prtn __maybe_unused)360 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused)
361 {
362 #ifdef CFG_CORE_SEL1_SPMC
363 int i = 0;
364
365 if (prtn->cookie_count)
366 return true;
367 bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i);
368 return i >= 0;
369 #else
370 return false;
371 #endif
372 }
373
get_prtn(struct guest_partition * prtn)374 static void get_prtn(struct guest_partition *prtn)
375 {
376 if (!refcount_inc(&prtn->refc))
377 panic();
378 }
379
virt_get_guest_id(struct guest_partition * prtn)380 uint16_t virt_get_guest_id(struct guest_partition *prtn)
381 {
382 if (!prtn)
383 return 0;
384 return prtn->id;
385 }
386
find_guest_by_id_unlocked(uint16_t guest_id)387 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id)
388 {
389 struct guest_partition *prtn = NULL;
390
391 LIST_FOREACH(prtn, &prtn_list, link)
392 if (!prtn->shutting_down && prtn->id == guest_id)
393 return prtn;
394
395 return NULL;
396 }
397
virt_next_guest(struct guest_partition * prtn)398 struct guest_partition *virt_next_guest(struct guest_partition *prtn)
399 {
400 struct guest_partition *ret = NULL;
401 uint32_t exceptions = 0;
402
403 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
404 if (prtn)
405 ret = LIST_NEXT(prtn, link);
406 else
407 ret = LIST_FIRST(&prtn_list);
408
409 while (ret && ret->shutting_down)
410 ret = LIST_NEXT(prtn, link);
411 if (ret)
412 get_prtn(ret);
413 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
414
415 virt_put_guest(prtn);
416
417 return ret;
418 }
419
virt_get_current_guest(void)420 struct guest_partition *virt_get_current_guest(void)
421 {
422 struct guest_partition *prtn = get_current_prtn();
423
424 if (prtn)
425 get_prtn(prtn);
426 return prtn;
427 }
428
virt_get_guest(uint16_t guest_id)429 struct guest_partition *virt_get_guest(uint16_t guest_id)
430 {
431 struct guest_partition *prtn = NULL;
432 uint32_t exceptions = 0;
433
434 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
435 prtn = find_guest_by_id_unlocked(guest_id);
436 if (prtn)
437 get_prtn(prtn);
438 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
439
440 return prtn;
441 }
442
virt_put_guest(struct guest_partition * prtn)443 void virt_put_guest(struct guest_partition *prtn)
444 {
445 if (prtn && refcount_dec(&prtn->refc)) {
446 uint32_t exceptions = 0;
447 bool do_free = true;
448
449 assert(prtn->shutting_down);
450
451 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
452 LIST_REMOVE(prtn, link);
453 if (prtn_have_remaining_resources(prtn)) {
454 LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link);
455 /*
456 * Delay the nex_free() until
457 * virt_reclaim_cookie_from_destroyed_guest()
458 * is done with this partition.
459 */
460 do_free = false;
461 }
462 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
463
464 destroy_gsd(prtn, false /*!free_only*/);
465 tee_mm_free(prtn->tee_ram);
466 prtn->tee_ram = NULL;
467 tee_mm_free(prtn->ta_ram);
468 prtn->ta_ram = NULL;
469 tee_mm_free(prtn->tables);
470 prtn->tables = NULL;
471 core_free_mmu_prtn(prtn->mmu_prtn);
472 prtn->mmu_prtn = NULL;
473 nex_free(prtn->mem_map.map);
474 prtn->mem_map.map = NULL;
475 if (do_free)
476 nex_free(prtn);
477 }
478 }
479
virt_guest_destroyed(uint16_t guest_id)480 TEE_Result virt_guest_destroyed(uint16_t guest_id)
481 {
482 struct guest_partition *prtn = NULL;
483 uint32_t exceptions = 0;
484
485 IMSG("Removing guest %"PRId16, guest_id);
486
487 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
488
489 prtn = find_guest_by_id_unlocked(guest_id);
490 if (prtn && !prtn->got_guest_destroyed)
491 prtn->got_guest_destroyed = true;
492 else
493 prtn = NULL;
494
495 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
496
497 if (prtn) {
498 notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);
499
500 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
501 prtn->shutting_down = true;
502 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
503
504 virt_put_guest(prtn);
505 } else {
506 EMSG("Client with id %d is not found", guest_id);
507 }
508
509 return TEE_SUCCESS;
510 }
511
virt_set_guest(uint16_t guest_id)512 TEE_Result virt_set_guest(uint16_t guest_id)
513 {
514 struct guest_partition *prtn = get_current_prtn();
515
516 /* This can be true only if we return from IRQ RPC */
517 if (prtn && prtn->id == guest_id)
518 return TEE_SUCCESS;
519
520 if (prtn)
521 panic("Virtual guest partition is already set");
522
523 prtn = virt_get_guest(guest_id);
524 if (!prtn)
525 return TEE_ERROR_ITEM_NOT_FOUND;
526
527 set_current_prtn(prtn);
528 core_mmu_set_prtn(prtn->mmu_prtn);
529
530 return TEE_SUCCESS;
531 }
532
virt_unset_guest(void)533 void virt_unset_guest(void)
534 {
535 struct guest_partition *prtn = get_current_prtn();
536
537 if (!prtn)
538 return;
539
540 set_current_prtn(NULL);
541 core_mmu_set_default_prtn();
542 virt_put_guest(prtn);
543 }
544
virt_on_stdcall(void)545 void virt_on_stdcall(void)
546 {
547 struct guest_partition *prtn = get_current_prtn();
548
549 /* Initialize runtime on first std call */
550 if (!prtn->runtime_initialized) {
551 mutex_lock(&prtn->mutex);
552 if (!prtn->runtime_initialized) {
553 init_tee_runtime();
554 call_driver_initcalls();
555 prtn->runtime_initialized = true;
556 }
557 mutex_unlock(&prtn->mutex);
558 }
559 }
560
virt_get_memory_map(void)561 struct memory_map *virt_get_memory_map(void)
562 {
563 struct guest_partition *prtn;
564
565 prtn = get_current_prtn();
566
567 if (!prtn)
568 return NULL;
569
570 return &prtn->mem_map;
571 }
572
573 #ifdef CFG_CORE_SEL1_SPMC
find_cookie(struct guest_partition * prtn,uint64_t cookie)574 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
575 {
576 int i = 0;
577
578 for (i = 0; i < prtn->cookie_count; i++)
579 if (prtn->cookies[i] == cookie)
580 return i;
581 return -1;
582 }
583
find_prtn_cookie(uint64_t cookie,int * idx)584 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
585 {
586 struct guest_partition *prtn = NULL;
587 int i = 0;
588
589 LIST_FOREACH(prtn, &prtn_list, link) {
590 i = find_cookie(prtn, cookie);
591 if (i >= 0) {
592 if (idx)
593 *idx = i;
594 return prtn;
595 }
596 }
597
598 return NULL;
599 }
600
virt_add_cookie_to_current_guest(uint64_t cookie)601 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
602 {
603 TEE_Result res = TEE_ERROR_ACCESS_DENIED;
604 struct guest_partition *prtn = NULL;
605 uint32_t exceptions = 0;
606
607 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
608 if (find_prtn_cookie(cookie, NULL))
609 goto out;
610
611 prtn = current_partition[get_core_pos()];
612 if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
613 prtn->cookies[prtn->cookie_count] = cookie;
614 prtn->cookie_count++;
615 res = TEE_SUCCESS;
616 }
617 out:
618 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
619
620 return res;
621 }
622
virt_remove_cookie(uint64_t cookie)623 void virt_remove_cookie(uint64_t cookie)
624 {
625 struct guest_partition *prtn = NULL;
626 uint32_t exceptions = 0;
627 int i = 0;
628
629 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
630 prtn = find_prtn_cookie(cookie, &i);
631 if (prtn) {
632 memmove(prtn->cookies + i, prtn->cookies + i + 1,
633 sizeof(uint64_t) * (prtn->cookie_count - i - 1));
634 prtn->cookie_count--;
635 }
636 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
637 }
638
virt_find_guest_by_cookie(uint64_t cookie)639 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
640 {
641 struct guest_partition *prtn = NULL;
642 uint32_t exceptions = 0;
643 uint16_t ret = 0;
644
645 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
646 prtn = find_prtn_cookie(cookie, NULL);
647 if (prtn)
648 ret = prtn->id;
649
650 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
651
652 return ret;
653 }
654
virt_get_shm_bits(void)655 bitstr_t *virt_get_shm_bits(void)
656 {
657 return get_current_prtn()->shm_bits;
658 }
659
reclaim_cookie(struct guest_partition * prtn,uint64_t cookie)660 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie)
661 {
662 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) {
663 size_t n = 0;
664
665 for (n = 0; n < prtn->cookie_count; n++) {
666 if (prtn->cookies[n] == cookie) {
667 memmove(prtn->cookies + n,
668 prtn->cookies + n + 1,
669 sizeof(uint64_t) *
670 (prtn->cookie_count - n - 1));
671 prtn->cookie_count--;
672 return TEE_SUCCESS;
673 }
674 }
675 } else {
676 uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT |
677 SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK,
678 FFA_MEMORY_HANDLE_PRTN_SHIFT);
679 int64_t i = cookie & ~mask;
680
681 if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT &&
682 bit_test(prtn->shm_bits, i)) {
683 bit_clear(prtn->shm_bits, i);
684 return TEE_SUCCESS;
685 }
686 }
687
688 return TEE_ERROR_ITEM_NOT_FOUND;
689 }
690
virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,uint64_t cookie)691 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id,
692 uint64_t cookie)
693
694 {
695 struct guest_partition *prtn = NULL;
696 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND;
697 uint32_t exceptions = 0;
698
699 exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
700 LIST_FOREACH(prtn, &prtn_destroy_list, link) {
701 if (prtn->id == guest_id) {
702 res = reclaim_cookie(prtn, cookie);
703 if (prtn_have_remaining_resources(prtn))
704 prtn = NULL;
705 else
706 LIST_REMOVE(prtn, link);
707 break;
708 }
709 }
710 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
711
712 nex_free(prtn);
713
714 return res;
715 }
716 #endif /*CFG_CORE_SEL1_SPMC*/
717
virt_add_guest_spec_data(unsigned int * data_id,size_t data_size,void (* data_destroy)(void * data))718 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size,
719 void (*data_destroy)(void *data))
720 {
721 void *p = NULL;
722
723 /*
724 * This function only executes successfully in a single threaded
725 * environment before exiting to the normal world the first time.
726 * If add_disabled is true, it means we're not in this environment
727 * any longer.
728 */
729
730 if (add_disabled)
731 return TEE_ERROR_BAD_PARAMETERS;
732
733 p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1));
734 if (!p)
735 return TEE_ERROR_OUT_OF_MEMORY;
736 gsd_array = p;
737
738 gsd_array[gsd_count] = (struct guest_spec_data){
739 .size = data_size,
740 .destroy = data_destroy,
741 };
742 *data_id = gsd_count + 1;
743 gsd_count++;
744 return TEE_SUCCESS;
745 }
746
virt_get_guest_spec_data(struct guest_partition * prtn,unsigned int data_id)747 void *virt_get_guest_spec_data(struct guest_partition *prtn,
748 unsigned int data_id)
749 {
750 assert(data_id);
751 if (!data_id || !prtn || data_id > gsd_count)
752 return NULL;
753 return prtn->data_array[data_id - 1];
754 }
755
virt_disable_add(void)756 static TEE_Result virt_disable_add(void)
757 {
758 add_disabled = true;
759
760 return TEE_SUCCESS;
761 }
762 nex_release_init_resource(virt_disable_add);
763