1 // SPDX-License-Identifier: BSD-2-Clause 2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */ 3 4 #include <compiler.h> 5 #include <platform_config.h> 6 #include <kernel/boot.h> 7 #include <kernel/linker.h> 8 #include <kernel/mutex.h> 9 #include <kernel/misc.h> 10 #include <kernel/panic.h> 11 #include <kernel/refcount.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/virtualization.h> 14 #include <mm/core_memprot.h> 15 #include <mm/core_mmu.h> 16 #include <mm/tee_mm.h> 17 #include <platform_config.h> 18 #include <sm/optee_smc.h> 19 #include <string.h> 20 #include <util.h> 21 22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK; 23 24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data = 25 LIST_HEAD_INITIALIZER(prtn_list_head); 26 27 /* Free pages used for guest partitions */ 28 tee_mm_pool_t virt_mapper_pool __nex_bss; 29 30 /* Memory used by OP-TEE core */ 31 struct tee_mmap_region *kmemory_map __nex_bss; 32 33 struct guest_partition { 34 LIST_ENTRY(guest_partition) link; 35 struct mmu_partition *mmu_prtn; 36 struct tee_mmap_region *memory_map; 37 struct mutex mutex; 38 void *tables_va; 39 tee_mm_entry_t *tee_ram; 40 tee_mm_entry_t *ta_ram; 41 tee_mm_entry_t *tables; 42 bool runtime_initialized; 43 uint16_t id; 44 struct refcount refc; 45 }; 46 47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss; 48 49 static struct guest_partition *get_current_prtn(void) 50 { 51 struct guest_partition *ret; 52 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 53 54 ret = current_partition[get_core_pos()]; 55 56 thread_unmask_exceptions(exceptions); 57 58 return ret; 59 } 60 61 static void set_current_prtn(struct guest_partition *prtn) 62 { 63 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 64 65 current_partition[get_core_pos()] = prtn; 66 67 thread_unmask_exceptions(exceptions); 68 } 69 70 static size_t get_ta_ram_size(void) 71 { 72 return ROUNDDOWN(TA_RAM_SIZE / CFG_VIRT_GUEST_COUNT - 73 VCORE_UNPG_RW_SZ - 74 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE); 75 } 76 77 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data, 78 paddr_t ta_ram) 79 { 80 int i, entries; 81 vaddr_t max_va = 0; 82 struct tee_mmap_region *map; 83 /* 84 * This function assumes that at time of operation, 85 * kmemory_map (aka static_memory_map from core_mmu.c) 86 * will not be altered. This is true, because all 87 * changes to static_memory_map are done during 88 * OP-TEE initialization, while this function will 89 * called when hypervisor creates a guest. 90 */ 91 92 /* Count number of entries in nexus memory map */ 93 for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END; 94 map++, entries++) 95 ; 96 97 /* Allocate entries for virtual guest map */ 98 map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region)); 99 if (!map) 100 return NULL; 101 102 memcpy(map, kmemory_map, sizeof(*map) * entries); 103 104 /* Map TEE .data and .bss sections */ 105 for (i = 0; i < entries; i++) { 106 if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) { 107 map[i].type = MEM_AREA_TEE_RAM_RW; 108 map[i].attr = core_mmu_type_to_attr(map[i].type); 109 map[i].pa = tee_data; 110 } 111 if (map[i].va + map[i].size > max_va) 112 max_va = map[i].va + map[i].size; 113 } 114 115 /* Map TA_RAM */ 116 assert(map[entries - 1].type == MEM_AREA_END); 117 map[entries] = map[entries - 1]; 118 map[entries - 1].region_size = SMALL_PAGE_SIZE; 119 map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size); 120 map[entries - 1].va += 121 (ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK; 122 map[entries - 1].pa = ta_ram; 123 map[entries - 1].size = get_ta_ram_size(); 124 map[entries - 1].type = MEM_AREA_TA_RAM; 125 map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type); 126 127 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA)); 128 129 for (i = 0; i < entries; i++) 130 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x", 131 teecore_memtype_name(map[i].type), 132 map[i].region_size, map[i].pa, map[i].va, 133 map[i].size, map[i].attr); 134 return map; 135 } 136 137 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base, 138 paddr_size_t secmem0_size, paddr_t secmem1_base, 139 paddr_size_t secmem1_size) 140 { 141 struct tee_mmap_region *map = NULL; 142 paddr_size_t size = secmem0_size; 143 paddr_t base = secmem0_base; 144 145 if (secmem1_size) { 146 assert(secmem0_base + secmem0_size <= secmem1_base); 147 size = secmem1_base + secmem1_size - base; 148 } 149 150 /* Init page pool that covers all secure RAM */ 151 if (!tee_mm_init(&virt_mapper_pool, base, size, 152 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC)) 153 panic("Can't create pool with free pages"); 154 DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA, 155 base, base + size); 156 157 if (secmem1_size) { 158 /* Carve out an eventual gap between secmem0 and secmem1 */ 159 base = secmem0_base + secmem0_size; 160 size = secmem1_base - base; 161 if (size) { 162 DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")", 163 base, size); 164 if (!tee_mm_alloc2(&virt_mapper_pool, base, size)) 165 panic("Can't carve out secmem gap"); 166 } 167 } 168 169 170 /* Carve out areas that are used by OP-TEE core */ 171 for (map = memory_map; map->type != MEM_AREA_END; map++) { 172 switch (map->type) { 173 case MEM_AREA_TEE_RAM_RX: 174 case MEM_AREA_TEE_RAM_RO: 175 case MEM_AREA_NEX_RAM_RO: 176 case MEM_AREA_NEX_RAM_RW: 177 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)", 178 map->type, map->pa, map->pa + map->size); 179 if (!tee_mm_alloc2(&virt_mapper_pool, map->pa, 180 map->size)) 181 panic("Can't carve out used area"); 182 break; 183 default: 184 continue; 185 } 186 } 187 188 kmemory_map = memory_map; 189 } 190 191 192 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn) 193 { 194 TEE_Result res = TEE_SUCCESS; 195 paddr_t original_data_pa = 0; 196 197 prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ); 198 if (!prtn->tee_ram) { 199 EMSG("Can't allocate memory for TEE runtime context"); 200 res = TEE_ERROR_OUT_OF_MEMORY; 201 goto err; 202 } 203 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram)); 204 205 prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size()); 206 if (!prtn->ta_ram) { 207 EMSG("Can't allocate memory for TA data"); 208 res = TEE_ERROR_OUT_OF_MEMORY; 209 goto err; 210 } 211 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram)); 212 213 prtn->tables = tee_mm_alloc(&virt_mapper_pool, 214 core_mmu_get_total_pages_size()); 215 if (!prtn->tables) { 216 EMSG("Can't allocate memory for page tables"); 217 res = TEE_ERROR_OUT_OF_MEMORY; 218 goto err; 219 } 220 221 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables), 222 MEM_AREA_SEC_RAM_OVERALL, 223 core_mmu_get_total_pages_size()); 224 assert(prtn->tables_va); 225 226 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va); 227 if (!prtn->mmu_prtn) { 228 res = TEE_ERROR_OUT_OF_MEMORY; 229 goto err; 230 } 231 232 prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram), 233 tee_mm_get_smem(prtn->ta_ram)); 234 if (!prtn->memory_map) { 235 res = TEE_ERROR_OUT_OF_MEMORY; 236 goto err; 237 } 238 239 core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map); 240 241 original_data_pa = virt_to_phys(__data_start); 242 /* Switch to guest's mappings */ 243 core_mmu_set_prtn(prtn->mmu_prtn); 244 245 /* clear .bss */ 246 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ); 247 248 /* copy .data section from R/O original */ 249 memcpy(__data_start, 250 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL, 251 __data_end - __data_start), 252 __data_end - __data_start); 253 254 return TEE_SUCCESS; 255 256 err: 257 if (prtn->tee_ram) 258 tee_mm_free(prtn->tee_ram); 259 if (prtn->ta_ram) 260 tee_mm_free(prtn->ta_ram); 261 if (prtn->tables) 262 tee_mm_free(prtn->tables); 263 nex_free(prtn->mmu_prtn); 264 nex_free(prtn->memory_map); 265 266 return res; 267 } 268 269 TEE_Result virt_guest_created(uint16_t guest_id) 270 { 271 struct guest_partition *prtn = NULL; 272 TEE_Result res = TEE_SUCCESS; 273 uint32_t exceptions = 0; 274 275 prtn = nex_calloc(1, sizeof(*prtn)); 276 if (!prtn) 277 return TEE_ERROR_OUT_OF_MEMORY; 278 279 prtn->id = guest_id; 280 mutex_init(&prtn->mutex); 281 refcount_set(&prtn->refc, 1); 282 res = configure_guest_prtn_mem(prtn); 283 if (res) { 284 nex_free(prtn); 285 return res; 286 } 287 288 set_current_prtn(prtn); 289 290 /* Initialize threads */ 291 thread_init_threads(); 292 /* Do the preinitcalls */ 293 call_preinitcalls(); 294 295 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 296 LIST_INSERT_HEAD(&prtn_list, prtn, link); 297 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 298 299 IMSG("Added guest %d", guest_id); 300 301 set_current_prtn(NULL); 302 core_mmu_set_default_prtn(); 303 304 return TEE_SUCCESS; 305 } 306 307 TEE_Result virt_guest_destroyed(uint16_t guest_id) 308 { 309 struct guest_partition *prtn; 310 uint32_t exceptions; 311 312 IMSG("Removing guest %d", guest_id); 313 314 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 315 316 LIST_FOREACH(prtn, &prtn_list, link) { 317 if (prtn->id == guest_id) { 318 LIST_REMOVE(prtn, link); 319 break; 320 } 321 } 322 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 323 324 if (prtn) { 325 if (!refcount_dec(&prtn->refc)) { 326 EMSG("Guest thread(s) is still running. refc = %d", 327 refcount_val(&prtn->refc)); 328 panic(); 329 } 330 331 tee_mm_free(prtn->tee_ram); 332 tee_mm_free(prtn->ta_ram); 333 tee_mm_free(prtn->tables); 334 core_free_mmu_prtn(prtn->mmu_prtn); 335 nex_free(prtn->memory_map); 336 nex_free(prtn); 337 } else 338 EMSG("Client with id %d is not found", guest_id); 339 340 return TEE_SUCCESS; 341 } 342 343 TEE_Result virt_set_guest(uint16_t guest_id) 344 { 345 struct guest_partition *prtn; 346 uint32_t exceptions; 347 348 prtn = get_current_prtn(); 349 350 /* This can be true only if we return from IRQ RPC */ 351 if (prtn && prtn->id == guest_id) 352 return TEE_SUCCESS; 353 354 if (prtn) 355 panic("Virtual guest partition is already set"); 356 357 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 358 LIST_FOREACH(prtn, &prtn_list, link) { 359 if (prtn->id == guest_id) { 360 set_current_prtn(prtn); 361 core_mmu_set_prtn(prtn->mmu_prtn); 362 refcount_inc(&prtn->refc); 363 cpu_spin_unlock_xrestore(&prtn_list_lock, 364 exceptions); 365 return TEE_SUCCESS; 366 } 367 } 368 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 369 370 if (guest_id == HYP_CLNT_ID) 371 return TEE_SUCCESS; 372 return TEE_ERROR_ITEM_NOT_FOUND; 373 } 374 375 void virt_unset_guest(void) 376 { 377 struct guest_partition *prtn = get_current_prtn(); 378 379 if (!prtn) 380 return; 381 382 set_current_prtn(NULL); 383 core_mmu_set_default_prtn(); 384 if (refcount_dec(&prtn->refc)) 385 panic(); 386 } 387 388 void virt_on_stdcall(void) 389 { 390 struct guest_partition *prtn = get_current_prtn(); 391 392 /* Initialize runtime on first std call */ 393 if (!prtn->runtime_initialized) { 394 mutex_lock(&prtn->mutex); 395 if (!prtn->runtime_initialized) { 396 init_tee_runtime(); 397 prtn->runtime_initialized = true; 398 } 399 mutex_unlock(&prtn->mutex); 400 } 401 } 402 403 struct tee_mmap_region *virt_get_memory_map(void) 404 { 405 struct guest_partition *prtn; 406 407 prtn = get_current_prtn(); 408 409 if (!prtn) 410 return NULL; 411 412 return prtn->memory_map; 413 } 414 415 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end) 416 { 417 struct guest_partition *prtn = get_current_prtn(); 418 419 *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram), 420 MEM_AREA_TA_RAM, 421 tee_mm_get_bytes(prtn->ta_ram)); 422 *end = *start + tee_mm_get_bytes(prtn->ta_ram); 423 } 424