1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2018, EPAM Systems. All rights reserved. 4 * Copyright (c) 2023-2024, Linaro Limited 5 */ 6 7 #include <bitstring.h> 8 #include <compiler.h> 9 #include <kernel/boot.h> 10 #include <kernel/linker.h> 11 #include <kernel/misc.h> 12 #include <kernel/mutex.h> 13 #include <kernel/notif.h> 14 #include <kernel/panic.h> 15 #include <kernel/refcount.h> 16 #include <kernel/spinlock.h> 17 #include <kernel/thread_spmc.h> 18 #include <kernel/virtualization.h> 19 #include <mm/core_memprot.h> 20 #include <mm/core_mmu.h> 21 #include <mm/phys_mem.h> 22 #include <mm/tee_mm.h> 23 #include <platform_config.h> 24 #include <sm/optee_smc.h> 25 #include <string.h> 26 #include <string_ext.h> 27 #include <util.h> 28 29 LIST_HEAD(prtn_list_head, guest_partition); 30 31 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK; 32 33 static struct prtn_list_head prtn_list __nex_data = 34 LIST_HEAD_INITIALIZER(prtn_list); 35 static struct prtn_list_head prtn_destroy_list __nex_data = 36 LIST_HEAD_INITIALIZER(prtn_destroy_list); 37 38 /* Memory used by OP-TEE core */ 39 struct memory_map *kmem_map __nex_bss; 40 41 struct guest_spec_data { 42 size_t size; 43 void (*destroy)(void *data); 44 }; 45 46 static bool add_disabled __nex_bss; 47 static unsigned gsd_count __nex_bss; 48 static struct guest_spec_data *gsd_array __nex_bss; 49 50 struct guest_partition { 51 LIST_ENTRY(guest_partition) link; 52 struct mmu_partition *mmu_prtn; 53 struct memory_map mem_map; 54 struct mutex mutex; 55 void *tables_va; 56 tee_mm_entry_t *tee_ram; 57 tee_mm_entry_t *ta_ram; 58 tee_mm_entry_t *tables; 59 bool runtime_initialized; 60 bool got_guest_destroyed; 61 bool shutting_down; 62 uint16_t id; 63 struct refcount refc; 64 #ifdef CFG_CORE_SEL1_SPMC 65 uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT]; 66 uint8_t cookie_count; 67 bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT); 68 #endif 69 void **data_array; 70 }; 71 72 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss; 73 74 static struct guest_partition *get_current_prtn(void) 75 { 76 struct guest_partition *ret; 77 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 78 79 ret = current_partition[get_core_pos()]; 80 81 thread_unmask_exceptions(exceptions); 82 83 return ret; 84 } 85 86 uint16_t virt_get_current_guest_id(void) 87 { 88 struct guest_partition *prtn = get_current_prtn(); 89 90 if (!prtn) 91 return 0; 92 return prtn->id; 93 } 94 95 static void set_current_prtn(struct guest_partition *prtn) 96 { 97 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 98 99 current_partition[get_core_pos()] = prtn; 100 101 thread_unmask_exceptions(exceptions); 102 } 103 104 static size_t get_ta_ram_size(void) 105 { 106 size_t ta_size = nex_phys_mem_get_ta_size(); 107 108 return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ - 109 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE); 110 } 111 112 static TEE_Result prepare_memory_map(struct memory_map *mem_map, 113 paddr_t tee_data) 114 { 115 struct tee_mmap_region *map = NULL; 116 vaddr_t max_va = 0; 117 size_t n = 0; 118 /* 119 * This function assumes that at time of operation, 120 * kmemory_map (aka static_memory_map from core_mmu.c) 121 * will not be altered. This is true, because all 122 * changes to static_memory_map are done during 123 * OP-TEE initialization, while this function will 124 * called when hypervisor creates a guest. 125 */ 126 127 /* Allocate entries for virtual guest map */ 128 mem_map->map = nex_calloc(kmem_map->count + 1, sizeof(*mem_map->map)); 129 if (!mem_map->map) 130 return TEE_ERROR_OUT_OF_MEMORY; 131 mem_map->count = kmem_map->count; 132 mem_map->alloc_count = kmem_map->count + 1; 133 134 memcpy(mem_map->map, kmem_map->map, 135 sizeof(*mem_map->map) * mem_map->count); 136 137 /* Map TEE .data and .bss sections */ 138 for (n = 0; n < mem_map->count; n++) { 139 map = mem_map->map + n; 140 if (map->va == (vaddr_t)(VCORE_UNPG_RW_PA)) { 141 map->type = MEM_AREA_TEE_RAM_RW; 142 map->attr = core_mmu_type_to_attr(map->type); 143 map->pa = tee_data; 144 } 145 if (map->va + map->size > max_va) 146 max_va = map->va + map->size; 147 } 148 149 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA)); 150 151 for (n = 0; n < mem_map->count; n++) 152 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x", 153 teecore_memtype_name(mem_map->map[n].type), 154 mem_map->map[n].region_size, mem_map->map[n].pa, 155 mem_map->map[n].va, mem_map->map[n].size, 156 mem_map->map[n].attr); 157 return TEE_SUCCESS; 158 } 159 160 void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base, 161 paddr_size_t secmem0_size, paddr_t secmem1_base, 162 paddr_size_t secmem1_size) 163 { 164 size_t n = 0; 165 166 /* Init page pool that covers all secure RAM */ 167 nex_phys_mem_init(secmem0_base, secmem0_size, secmem1_base, 168 secmem1_size); 169 170 /* Carve out areas that are used by OP-TEE core */ 171 for (n = 0; n < mem_map->count; n++) { 172 struct tee_mmap_region *map = mem_map->map + n; 173 174 switch (map->type) { 175 case MEM_AREA_TEE_RAM_RX: 176 case MEM_AREA_TEE_RAM_RO: 177 case MEM_AREA_NEX_RAM_RO: 178 case MEM_AREA_NEX_RAM_RW: 179 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)", 180 map->type, map->pa, map->pa + map->size); 181 if (!nex_phys_mem_alloc2(map->pa, map->size)) 182 panic("Can't carve out used area"); 183 break; 184 default: 185 continue; 186 } 187 } 188 189 kmem_map = mem_map; 190 } 191 192 193 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn) 194 { 195 TEE_Result res = TEE_SUCCESS; 196 paddr_t original_data_pa = 0; 197 198 prtn->tee_ram = nex_phys_mem_core_alloc(VCORE_UNPG_RW_SZ); 199 if (!prtn->tee_ram) { 200 EMSG("Can't allocate memory for TEE runtime context"); 201 res = TEE_ERROR_OUT_OF_MEMORY; 202 goto err; 203 } 204 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram)); 205 206 prtn->ta_ram = nex_phys_mem_ta_alloc(get_ta_ram_size()); 207 if (!prtn->ta_ram) { 208 EMSG("Can't allocate memory for TA data"); 209 res = TEE_ERROR_OUT_OF_MEMORY; 210 goto err; 211 } 212 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram)); 213 214 prtn->tables = nex_phys_mem_core_alloc(core_mmu_get_total_pages_size()); 215 if (!prtn->tables) { 216 EMSG("Can't allocate memory for page tables"); 217 res = TEE_ERROR_OUT_OF_MEMORY; 218 goto err; 219 } 220 221 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables), 222 MEM_AREA_SEC_RAM_OVERALL, 223 core_mmu_get_total_pages_size()); 224 assert(prtn->tables_va); 225 226 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va); 227 if (!prtn->mmu_prtn) { 228 res = TEE_ERROR_OUT_OF_MEMORY; 229 goto err; 230 } 231 232 res = prepare_memory_map(&prtn->mem_map, 233 tee_mm_get_smem(prtn->tee_ram)); 234 if (res) 235 goto err; 236 237 core_init_mmu_prtn(prtn->mmu_prtn, &prtn->mem_map); 238 239 original_data_pa = virt_to_phys(__data_start); 240 /* Switch to guest's mappings */ 241 core_mmu_set_prtn(prtn->mmu_prtn); 242 243 /* clear .bss */ 244 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ); 245 246 /* copy .data section from R/O original */ 247 memcpy(__data_start, 248 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL, 249 __data_end - __data_start), 250 __data_end - __data_start); 251 252 return TEE_SUCCESS; 253 254 err: 255 if (prtn->tee_ram) 256 tee_mm_free(prtn->tee_ram); 257 if (prtn->ta_ram) 258 tee_mm_free(prtn->ta_ram); 259 if (prtn->tables) 260 tee_mm_free(prtn->tables); 261 nex_free(prtn->mmu_prtn); 262 nex_free(prtn->mem_map.map); 263 264 return res; 265 } 266 267 static void destroy_gsd(struct guest_partition *prtn, bool free_only) 268 { 269 size_t n = 0; 270 271 for (n = 0; n < gsd_count; n++) { 272 if (!free_only && prtn->data_array[n] && gsd_array[n].destroy) 273 gsd_array[n].destroy(prtn->data_array[n]); 274 nex_free(prtn->data_array[n]); 275 } 276 nex_free(prtn->data_array); 277 prtn->data_array = NULL; 278 } 279 280 static TEE_Result alloc_gsd(struct guest_partition *prtn) 281 { 282 unsigned int n = 0; 283 284 if (!gsd_count) 285 return TEE_SUCCESS; 286 287 prtn->data_array = nex_calloc(gsd_count, sizeof(void *)); 288 if (!prtn->data_array) 289 return TEE_ERROR_OUT_OF_MEMORY; 290 291 for (n = 0; n < gsd_count; n++) { 292 prtn->data_array[n] = nex_calloc(1, gsd_array[n].size); 293 if (!prtn->data_array[n]) { 294 destroy_gsd(prtn, true /*free_only*/); 295 return TEE_ERROR_OUT_OF_MEMORY; 296 } 297 } 298 299 return TEE_SUCCESS; 300 } 301 TEE_Result virt_guest_created(uint16_t guest_id) 302 { 303 struct guest_partition *prtn = NULL; 304 TEE_Result res = TEE_SUCCESS; 305 uint32_t exceptions = 0; 306 307 prtn = nex_calloc(1, sizeof(*prtn)); 308 if (!prtn) 309 return TEE_ERROR_OUT_OF_MEMORY; 310 311 res = alloc_gsd(prtn); 312 if (res) 313 goto err_free_prtn; 314 315 prtn->id = guest_id; 316 mutex_init(&prtn->mutex); 317 refcount_set(&prtn->refc, 1); 318 res = configure_guest_prtn_mem(prtn); 319 if (res) 320 goto err_free_gsd; 321 322 set_current_prtn(prtn); 323 324 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); 325 /* Initialize threads */ 326 thread_init_threads(); 327 /* Do the preinitcalls */ 328 call_preinitcalls(); 329 330 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 331 LIST_INSERT_HEAD(&prtn_list, prtn, link); 332 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 333 334 IMSG("Added guest %d", guest_id); 335 336 set_current_prtn(NULL); 337 core_mmu_set_default_prtn(); 338 339 return TEE_SUCCESS; 340 341 err_free_gsd: 342 destroy_gsd(prtn, true /*free_only*/); 343 err_free_prtn: 344 nex_free(prtn); 345 return res; 346 } 347 348 static bool 349 prtn_have_remaining_resources(struct guest_partition *prtn __maybe_unused) 350 { 351 #ifdef CFG_CORE_SEL1_SPMC 352 int i = 0; 353 354 if (prtn->cookie_count) 355 return true; 356 bit_ffs(prtn->shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT, &i); 357 return i >= 0; 358 #else 359 return false; 360 #endif 361 } 362 363 static void get_prtn(struct guest_partition *prtn) 364 { 365 if (!refcount_inc(&prtn->refc)) 366 panic(); 367 } 368 369 uint16_t virt_get_guest_id(struct guest_partition *prtn) 370 { 371 if (!prtn) 372 return 0; 373 return prtn->id; 374 } 375 376 static struct guest_partition *find_guest_by_id_unlocked(uint16_t guest_id) 377 { 378 struct guest_partition *prtn = NULL; 379 380 LIST_FOREACH(prtn, &prtn_list, link) 381 if (!prtn->shutting_down && prtn->id == guest_id) 382 return prtn; 383 384 return NULL; 385 } 386 387 struct guest_partition *virt_next_guest(struct guest_partition *prtn) 388 { 389 struct guest_partition *ret = NULL; 390 uint32_t exceptions = 0; 391 392 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 393 if (prtn) 394 ret = LIST_NEXT(prtn, link); 395 else 396 ret = LIST_FIRST(&prtn_list); 397 398 while (ret && ret->shutting_down) 399 ret = LIST_NEXT(prtn, link); 400 if (ret) 401 get_prtn(ret); 402 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 403 404 virt_put_guest(prtn); 405 406 return ret; 407 } 408 409 struct guest_partition *virt_get_current_guest(void) 410 { 411 struct guest_partition *prtn = get_current_prtn(); 412 413 if (prtn) 414 get_prtn(prtn); 415 return prtn; 416 } 417 418 struct guest_partition *virt_get_guest(uint16_t guest_id) 419 { 420 struct guest_partition *prtn = NULL; 421 uint32_t exceptions = 0; 422 423 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 424 prtn = find_guest_by_id_unlocked(guest_id); 425 if (prtn) 426 get_prtn(prtn); 427 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 428 429 return prtn; 430 } 431 432 void virt_put_guest(struct guest_partition *prtn) 433 { 434 if (prtn && refcount_dec(&prtn->refc)) { 435 uint32_t exceptions = 0; 436 bool do_free = true; 437 438 assert(prtn->shutting_down); 439 440 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 441 LIST_REMOVE(prtn, link); 442 if (prtn_have_remaining_resources(prtn)) { 443 LIST_INSERT_HEAD(&prtn_destroy_list, prtn, link); 444 /* 445 * Delay the nex_free() until 446 * virt_reclaim_cookie_from_destroyed_guest() 447 * is done with this partition. 448 */ 449 do_free = false; 450 } 451 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 452 453 destroy_gsd(prtn, false /*!free_only*/); 454 tee_mm_free(prtn->tee_ram); 455 prtn->tee_ram = NULL; 456 tee_mm_free(prtn->ta_ram); 457 prtn->ta_ram = NULL; 458 tee_mm_free(prtn->tables); 459 prtn->tables = NULL; 460 core_free_mmu_prtn(prtn->mmu_prtn); 461 prtn->mmu_prtn = NULL; 462 nex_free(prtn->mem_map.map); 463 prtn->mem_map.map = NULL; 464 if (do_free) 465 nex_free(prtn); 466 } 467 } 468 469 TEE_Result virt_guest_destroyed(uint16_t guest_id) 470 { 471 struct guest_partition *prtn = NULL; 472 uint32_t exceptions = 0; 473 474 IMSG("Removing guest %"PRId16, guest_id); 475 476 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 477 478 prtn = find_guest_by_id_unlocked(guest_id); 479 if (prtn && !prtn->got_guest_destroyed) 480 prtn->got_guest_destroyed = true; 481 else 482 prtn = NULL; 483 484 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 485 486 if (prtn) { 487 notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id); 488 489 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 490 prtn->shutting_down = true; 491 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 492 493 virt_put_guest(prtn); 494 } else { 495 EMSG("Client with id %d is not found", guest_id); 496 } 497 498 return TEE_SUCCESS; 499 } 500 501 TEE_Result virt_set_guest(uint16_t guest_id) 502 { 503 struct guest_partition *prtn = get_current_prtn(); 504 505 /* This can be true only if we return from IRQ RPC */ 506 if (prtn && prtn->id == guest_id) 507 return TEE_SUCCESS; 508 509 if (prtn) 510 panic("Virtual guest partition is already set"); 511 512 prtn = virt_get_guest(guest_id); 513 if (!prtn) 514 return TEE_ERROR_ITEM_NOT_FOUND; 515 516 set_current_prtn(prtn); 517 core_mmu_set_prtn(prtn->mmu_prtn); 518 519 return TEE_SUCCESS; 520 } 521 522 void virt_unset_guest(void) 523 { 524 struct guest_partition *prtn = get_current_prtn(); 525 526 if (!prtn) 527 return; 528 529 set_current_prtn(NULL); 530 core_mmu_set_default_prtn(); 531 virt_put_guest(prtn); 532 } 533 534 void virt_on_stdcall(void) 535 { 536 struct guest_partition *prtn = get_current_prtn(); 537 538 /* Initialize runtime on first std call */ 539 if (!prtn->runtime_initialized) { 540 mutex_lock(&prtn->mutex); 541 if (!prtn->runtime_initialized) { 542 init_tee_runtime(); 543 call_driver_initcalls(); 544 prtn->runtime_initialized = true; 545 } 546 mutex_unlock(&prtn->mutex); 547 } 548 } 549 550 struct memory_map *virt_get_memory_map(void) 551 { 552 struct guest_partition *prtn; 553 554 prtn = get_current_prtn(); 555 556 if (!prtn) 557 return NULL; 558 559 return &prtn->mem_map; 560 } 561 562 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end) 563 { 564 struct guest_partition *prtn = get_current_prtn(); 565 566 *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram), 567 MEM_AREA_SEC_RAM_OVERALL, 568 tee_mm_get_bytes(prtn->ta_ram)); 569 *end = *start + tee_mm_get_bytes(prtn->ta_ram); 570 } 571 572 #ifdef CFG_CORE_SEL1_SPMC 573 static int find_cookie(struct guest_partition *prtn, uint64_t cookie) 574 { 575 int i = 0; 576 577 for (i = 0; i < prtn->cookie_count; i++) 578 if (prtn->cookies[i] == cookie) 579 return i; 580 return -1; 581 } 582 583 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx) 584 { 585 struct guest_partition *prtn = NULL; 586 int i = 0; 587 588 LIST_FOREACH(prtn, &prtn_list, link) { 589 i = find_cookie(prtn, cookie); 590 if (i >= 0) { 591 if (idx) 592 *idx = i; 593 return prtn; 594 } 595 } 596 597 return NULL; 598 } 599 600 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie) 601 { 602 TEE_Result res = TEE_ERROR_ACCESS_DENIED; 603 struct guest_partition *prtn = NULL; 604 uint32_t exceptions = 0; 605 606 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 607 if (find_prtn_cookie(cookie, NULL)) 608 goto out; 609 610 prtn = current_partition[get_core_pos()]; 611 if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) { 612 prtn->cookies[prtn->cookie_count] = cookie; 613 prtn->cookie_count++; 614 res = TEE_SUCCESS; 615 } 616 out: 617 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 618 619 return res; 620 } 621 622 void virt_remove_cookie(uint64_t cookie) 623 { 624 struct guest_partition *prtn = NULL; 625 uint32_t exceptions = 0; 626 int i = 0; 627 628 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 629 prtn = find_prtn_cookie(cookie, &i); 630 if (prtn) { 631 memmove(prtn->cookies + i, prtn->cookies + i + 1, 632 sizeof(uint64_t) * (prtn->cookie_count - i - 1)); 633 prtn->cookie_count--; 634 } 635 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 636 } 637 638 uint16_t virt_find_guest_by_cookie(uint64_t cookie) 639 { 640 struct guest_partition *prtn = NULL; 641 uint32_t exceptions = 0; 642 uint16_t ret = 0; 643 644 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 645 prtn = find_prtn_cookie(cookie, NULL); 646 if (prtn) 647 ret = prtn->id; 648 649 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 650 651 return ret; 652 } 653 654 bitstr_t *virt_get_shm_bits(void) 655 { 656 return get_current_prtn()->shm_bits; 657 } 658 659 static TEE_Result reclaim_cookie(struct guest_partition *prtn, uint64_t cookie) 660 { 661 if (cookie & FFA_MEMORY_HANDLE_HYPERVISOR_BIT) { 662 size_t n = 0; 663 664 for (n = 0; n < prtn->cookie_count; n++) { 665 if (prtn->cookies[n] == cookie) { 666 memmove(prtn->cookies + n, 667 prtn->cookies + n + 1, 668 sizeof(uint64_t) * 669 (prtn->cookie_count - n - 1)); 670 prtn->cookie_count--; 671 return TEE_SUCCESS; 672 } 673 } 674 } else { 675 uint64_t mask = FFA_MEMORY_HANDLE_NON_SECURE_BIT | 676 SHIFT_U64(FFA_MEMORY_HANDLE_PRTN_MASK, 677 FFA_MEMORY_HANDLE_PRTN_SHIFT); 678 int64_t i = cookie & ~mask; 679 680 if (i >= 0 && i < SPMC_CORE_SEL1_MAX_SHM_COUNT && 681 bit_test(prtn->shm_bits, i)) { 682 bit_clear(prtn->shm_bits, i); 683 return TEE_SUCCESS; 684 } 685 } 686 687 return TEE_ERROR_ITEM_NOT_FOUND; 688 } 689 690 TEE_Result virt_reclaim_cookie_from_destroyed_guest(uint16_t guest_id, 691 uint64_t cookie) 692 693 { 694 struct guest_partition *prtn = NULL; 695 TEE_Result res = TEE_ERROR_ITEM_NOT_FOUND; 696 uint32_t exceptions = 0; 697 698 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 699 LIST_FOREACH(prtn, &prtn_destroy_list, link) { 700 if (prtn->id == guest_id) { 701 res = reclaim_cookie(prtn, cookie); 702 if (prtn_have_remaining_resources(prtn)) 703 prtn = NULL; 704 else 705 LIST_REMOVE(prtn, link); 706 break; 707 } 708 } 709 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 710 711 nex_free(prtn); 712 713 return res; 714 } 715 #endif /*CFG_CORE_SEL1_SPMC*/ 716 717 TEE_Result virt_add_guest_spec_data(unsigned int *data_id, size_t data_size, 718 void (*data_destroy)(void *data)) 719 { 720 void *p = NULL; 721 722 /* 723 * This function only executes successfully in a single threaded 724 * environment before exiting to the normal world the first time. 725 * If add_disabled is true, it means we're not in this environment 726 * any longer. 727 */ 728 729 if (add_disabled) 730 return TEE_ERROR_BAD_PARAMETERS; 731 732 p = nex_realloc(gsd_array, sizeof(*gsd_array) * (gsd_count + 1)); 733 if (!p) 734 return TEE_ERROR_OUT_OF_MEMORY; 735 gsd_array = p; 736 737 gsd_array[gsd_count] = (struct guest_spec_data){ 738 .size = data_size, 739 .destroy = data_destroy, 740 }; 741 *data_id = gsd_count + 1; 742 gsd_count++; 743 return TEE_SUCCESS; 744 } 745 746 void *virt_get_guest_spec_data(struct guest_partition *prtn, 747 unsigned int data_id) 748 { 749 assert(data_id); 750 if (!data_id || !prtn || data_id > gsd_count) 751 return NULL; 752 return prtn->data_array[data_id - 1]; 753 } 754 755 static TEE_Result virt_disable_add(void) 756 { 757 add_disabled = true; 758 759 return TEE_SUCCESS; 760 } 761 nex_release_init_resource(virt_disable_add); 762