1 // SPDX-License-Identifier: BSD-2-Clause 2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */ 3 4 #include <compiler.h> 5 #include <platform_config.h> 6 #include <kernel/boot.h> 7 #include <kernel/linker.h> 8 #include <kernel/mutex.h> 9 #include <kernel/misc.h> 10 #include <kernel/panic.h> 11 #include <kernel/refcount.h> 12 #include <kernel/spinlock.h> 13 #include <kernel/virtualization.h> 14 #include <mm/core_memprot.h> 15 #include <mm/core_mmu.h> 16 #include <mm/tee_mm.h> 17 #include <platform_config.h> 18 #include <sm/optee_smc.h> 19 #include <string.h> 20 #include <util.h> 21 22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK; 23 24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data = 25 LIST_HEAD_INITIALIZER(prtn_list_head); 26 27 /* Free pages used for guest partitions */ 28 tee_mm_pool_t virt_mapper_pool __nex_bss; 29 30 /* Memory used by OP-TEE core */ 31 struct tee_mmap_region *kmemory_map __nex_bss; 32 33 struct guest_partition { 34 LIST_ENTRY(guest_partition) link; 35 struct mmu_partition *mmu_prtn; 36 struct tee_mmap_region *memory_map; 37 struct mutex mutex; 38 void *tables_va; 39 tee_mm_entry_t *tee_ram; 40 tee_mm_entry_t *ta_ram; 41 tee_mm_entry_t *tables; 42 bool runtime_initialized; 43 uint16_t id; 44 struct refcount refc; 45 }; 46 47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss; 48 49 static struct guest_partition *get_current_prtn(void) 50 { 51 struct guest_partition *ret; 52 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 53 54 ret = current_partition[get_core_pos()]; 55 56 thread_unmask_exceptions(exceptions); 57 58 return ret; 59 } 60 61 static void set_current_prtn(struct guest_partition *prtn) 62 { 63 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 64 65 current_partition[get_core_pos()] = prtn; 66 67 thread_unmask_exceptions(exceptions); 68 } 69 70 static size_t get_ta_ram_size(void) 71 { 72 return ROUNDDOWN(TA_RAM_SIZE / CFG_VIRT_GUEST_COUNT - 73 VCORE_UNPG_RW_SZ - 74 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE); 75 } 76 77 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data, 78 paddr_t ta_ram) 79 { 80 int i, entries; 81 vaddr_t max_va = 0; 82 struct tee_mmap_region *map; 83 /* 84 * This function assumes that at time of operation, 85 * kmemory_map (aka static_memory_map from core_mmu.c) 86 * will not be altered. This is true, because all 87 * changes to static_memory_map are done during 88 * OP-TEE initialization, while this function will 89 * called when hypervisor creates a guest. 90 */ 91 92 /* Count number of entries in nexus memory map */ 93 for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END; 94 map++, entries++) 95 ; 96 97 /* Allocate entries for virtual guest map */ 98 map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region)); 99 if (!map) 100 return NULL; 101 102 memcpy(map, kmemory_map, sizeof(*map) * entries); 103 104 /* Map TEE .data and .bss sections */ 105 for (i = 0; i < entries; i++) { 106 if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) { 107 map[i].type = MEM_AREA_TEE_RAM_RW; 108 map[i].attr = core_mmu_type_to_attr(map[i].type); 109 map[i].pa = tee_data; 110 } 111 if (map[i].va + map[i].size > max_va) 112 max_va = map[i].va + map[i].size; 113 } 114 115 /* Map TA_RAM */ 116 assert(map[entries - 1].type == MEM_AREA_END); 117 map[entries] = map[entries - 1]; 118 map[entries - 1].region_size = SMALL_PAGE_SIZE; 119 map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size); 120 map[entries - 1].va += 121 (ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK; 122 map[entries - 1].pa = ta_ram; 123 map[entries - 1].size = get_ta_ram_size(); 124 map[entries - 1].type = MEM_AREA_TA_RAM; 125 map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type); 126 127 DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA)); 128 129 for (i = 0; i < entries; i++) 130 DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x", 131 teecore_memtype_name(map[i].type), 132 map[i].region_size, map[i].pa, map[i].va, 133 map[i].size, map[i].attr); 134 return map; 135 } 136 137 void virt_init_memory(struct tee_mmap_region *memory_map) 138 { 139 struct tee_mmap_region *map; 140 141 /* Init page pool that covers all secure RAM */ 142 if (!tee_mm_init(&virt_mapper_pool, TEE_RAM_START, 143 TA_RAM_START + TA_RAM_SIZE, 144 SMALL_PAGE_SHIFT, 145 TEE_MM_POOL_NEX_MALLOC)) 146 panic("Can't create pool with free pages"); 147 DMSG("Created virtual mapper pool from %x to %x", 148 TEE_RAM_START, TA_RAM_START + TA_RAM_SIZE); 149 150 /* Carve out areas that are used by OP-TEE core */ 151 for (map = memory_map; map->type != MEM_AREA_END; map++) { 152 switch (map->type) { 153 case MEM_AREA_TEE_RAM_RX: 154 case MEM_AREA_TEE_RAM_RO: 155 case MEM_AREA_NEX_RAM_RO: 156 case MEM_AREA_NEX_RAM_RW: 157 DMSG("Carving out area of type %d (0x%08lx-0x%08lx)", 158 map->type, map->pa, map->pa + map->size); 159 if (!tee_mm_alloc2(&virt_mapper_pool, map->pa, 160 map->size)) 161 panic("Can't carve out used area"); 162 break; 163 default: 164 continue; 165 } 166 } 167 168 kmemory_map = memory_map; 169 } 170 171 172 static int configure_guest_prtn_mem(struct guest_partition *prtn) 173 { 174 int ret; 175 paddr_t original_data_pa; 176 177 prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ); 178 if (!prtn->tee_ram) { 179 EMSG("Can't allocate memory for TEE runtime context"); 180 ret = TEE_ERROR_OUT_OF_MEMORY; 181 goto err; 182 } 183 DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram)); 184 185 prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size()); 186 if (!prtn->ta_ram) { 187 EMSG("Can't allocate memory for TA data"); 188 ret = TEE_ERROR_OUT_OF_MEMORY; 189 goto err; 190 } 191 DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram)); 192 193 prtn->tables = tee_mm_alloc(&virt_mapper_pool, 194 core_mmu_get_total_pages_size()); 195 if (!prtn->tables) { 196 EMSG("Can't allocate memory for page tables"); 197 ret = TEE_ERROR_OUT_OF_MEMORY; 198 goto err; 199 } 200 201 prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables), 202 MEM_AREA_SEC_RAM_OVERALL); 203 assert(prtn->tables_va); 204 205 prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va); 206 if (!prtn->mmu_prtn) { 207 ret = TEE_ERROR_OUT_OF_MEMORY; 208 goto err; 209 } 210 211 prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram), 212 tee_mm_get_smem(prtn->ta_ram)); 213 if (!prtn->memory_map) { 214 ret = TEE_ERROR_OUT_OF_MEMORY; 215 goto err; 216 } 217 218 core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map); 219 220 original_data_pa = virt_to_phys(__data_start); 221 /* Switch to guest's mappings */ 222 core_mmu_set_prtn(prtn->mmu_prtn); 223 224 /* clear .bss */ 225 memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ); 226 227 /* copy .data section from R/O original */ 228 memcpy(__data_start, 229 phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL), 230 __data_end - __data_start); 231 232 return 0; 233 234 err: 235 if (prtn->tee_ram) 236 tee_mm_free(prtn->tee_ram); 237 if (prtn->ta_ram) 238 tee_mm_free(prtn->ta_ram); 239 if (prtn->tables) 240 tee_mm_free(prtn->tables); 241 nex_free(prtn->mmu_prtn); 242 nex_free(prtn->memory_map); 243 244 return ret; 245 } 246 247 uint32_t virt_guest_created(uint16_t guest_id) 248 { 249 struct guest_partition *prtn; 250 uint32_t exceptions; 251 252 prtn = nex_calloc(1, sizeof(*prtn)); 253 if (!prtn) 254 return OPTEE_SMC_RETURN_ENOTAVAIL; 255 256 prtn->id = guest_id; 257 mutex_init(&prtn->mutex); 258 refcount_set(&prtn->refc, 1); 259 if (configure_guest_prtn_mem(prtn)) { 260 nex_free(prtn); 261 return OPTEE_SMC_RETURN_ENOTAVAIL; 262 } 263 264 set_current_prtn(prtn); 265 266 /* Initialize threads */ 267 thread_init_threads(); 268 269 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 270 LIST_INSERT_HEAD(&prtn_list, prtn, link); 271 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 272 273 IMSG("Added guest %d", guest_id); 274 275 set_current_prtn(NULL); 276 core_mmu_set_default_prtn(); 277 return OPTEE_SMC_RETURN_OK; 278 } 279 280 uint32_t virt_guest_destroyed(uint16_t guest_id) 281 { 282 struct guest_partition *prtn; 283 uint32_t exceptions; 284 285 IMSG("Removing guest %d", guest_id); 286 287 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 288 289 LIST_FOREACH(prtn, &prtn_list, link) { 290 if (prtn->id == guest_id) { 291 LIST_REMOVE(prtn, link); 292 break; 293 } 294 } 295 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 296 297 if (prtn) { 298 if (!refcount_dec(&prtn->refc)) { 299 EMSG("Guest thread(s) is still running. refc = %d", 300 refcount_val(&prtn->refc)); 301 panic(); 302 } 303 304 tee_mm_free(prtn->tee_ram); 305 tee_mm_free(prtn->ta_ram); 306 tee_mm_free(prtn->tables); 307 core_free_mmu_prtn(prtn->mmu_prtn); 308 nex_free(prtn->memory_map); 309 nex_free(prtn); 310 } else 311 EMSG("Client with id %d is not found", guest_id); 312 313 return OPTEE_SMC_RETURN_OK; 314 } 315 316 bool virt_set_guest(uint16_t guest_id) 317 { 318 struct guest_partition *prtn; 319 uint32_t exceptions; 320 321 prtn = get_current_prtn(); 322 323 /* This can be true only if we return from IRQ RPC */ 324 if (prtn && prtn->id == guest_id) 325 return true; 326 327 if (prtn) 328 panic("Virtual guest partition is already set"); 329 330 exceptions = cpu_spin_lock_xsave(&prtn_list_lock); 331 LIST_FOREACH(prtn, &prtn_list, link) { 332 if (prtn->id == guest_id) { 333 set_current_prtn(prtn); 334 core_mmu_set_prtn(prtn->mmu_prtn); 335 refcount_inc(&prtn->refc); 336 cpu_spin_unlock_xrestore(&prtn_list_lock, 337 exceptions); 338 return true; 339 } 340 } 341 cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions); 342 343 return guest_id == HYP_CLNT_ID; 344 } 345 346 void virt_unset_guest(void) 347 { 348 struct guest_partition *prtn = get_current_prtn(); 349 350 if (!prtn) 351 return; 352 353 set_current_prtn(NULL); 354 core_mmu_set_default_prtn(); 355 if (refcount_dec(&prtn->refc)) 356 panic(); 357 } 358 359 void virt_on_stdcall(void) 360 { 361 struct guest_partition *prtn = get_current_prtn(); 362 363 /* Initialize runtime on first std call */ 364 if (!prtn->runtime_initialized) { 365 mutex_lock(&prtn->mutex); 366 if (!prtn->runtime_initialized) { 367 init_tee_runtime(); 368 prtn->runtime_initialized = true; 369 } 370 mutex_unlock(&prtn->mutex); 371 } 372 } 373 374 struct tee_mmap_region *virt_get_memory_map(void) 375 { 376 struct guest_partition *prtn; 377 378 prtn = get_current_prtn(); 379 380 if (!prtn) 381 return NULL; 382 383 return prtn->memory_map; 384 } 385 386 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end) 387 { 388 struct guest_partition *prtn = get_current_prtn(); 389 390 *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram), 391 MEM_AREA_TA_RAM); 392 *end = *start + tee_mm_get_bytes(prtn->ta_ram); 393 } 394