xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 3e0b361ef4fdf7d730286dd5d32dddda531a5cd1)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023, Linaro Limited
5  */
6 
7 #include <bitstring.h>
8 #include <compiler.h>
9 #include <kernel/boot.h>
10 #include <kernel/linker.h>
11 #include <kernel/misc.h>
12 #include <kernel/mutex.h>
13 #include <kernel/panic.h>
14 #include <kernel/refcount.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread_spmc.h>
17 #include <kernel/virtualization.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <mm/tee_mm.h>
21 #include <platform_config.h>
22 #include <sm/optee_smc.h>
23 #include <string.h>
24 #include <util.h>
25 
26 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
27 
28 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
29 	LIST_HEAD_INITIALIZER(prtn_list_head);
30 
31 /* Free pages used for guest partitions */
32 tee_mm_pool_t virt_mapper_pool __nex_bss;
33 
34 /* Memory used by OP-TEE core */
35 struct tee_mmap_region *kmemory_map __nex_bss;
36 
37 struct guest_partition {
38 	LIST_ENTRY(guest_partition) link;
39 	struct mmu_partition *mmu_prtn;
40 	struct tee_mmap_region *memory_map;
41 	struct mutex mutex;
42 	void *tables_va;
43 	tee_mm_entry_t *tee_ram;
44 	tee_mm_entry_t *ta_ram;
45 	tee_mm_entry_t *tables;
46 	bool runtime_initialized;
47 	uint16_t id;
48 	struct refcount refc;
49 #ifdef CFG_CORE_SEL1_SPMC
50 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
51 	uint8_t cookie_count;
52 	bitstr_t bit_decl(shm_bits, SPMC_CORE_SEL1_MAX_SHM_COUNT);
53 #endif
54 };
55 
56 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
57 
58 static struct guest_partition *get_current_prtn(void)
59 {
60 	struct guest_partition *ret;
61 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
62 
63 	ret = current_partition[get_core_pos()];
64 
65 	thread_unmask_exceptions(exceptions);
66 
67 	return ret;
68 }
69 
70 uint16_t virt_get_current_guest_id(void)
71 {
72 	struct guest_partition *prtn = get_current_prtn();
73 
74 	if (!prtn)
75 		return 0;
76 	return prtn->id;
77 }
78 
79 static void set_current_prtn(struct guest_partition *prtn)
80 {
81 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
82 
83 	current_partition[get_core_pos()] = prtn;
84 
85 	thread_unmask_exceptions(exceptions);
86 }
87 
88 static size_t get_ta_ram_size(void)
89 {
90 	size_t ta_size = 0;
91 
92 	core_mmu_get_ta_range(NULL, &ta_size);
93 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
94 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
95 }
96 
97 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
98 						  paddr_t ta_ram)
99 {
100 	int i, entries;
101 	vaddr_t max_va = 0;
102 	struct tee_mmap_region *map;
103 	/*
104 	 * This function assumes that at time of operation,
105 	 * kmemory_map (aka static_memory_map from core_mmu.c)
106 	 * will not be altered. This is true, because all
107 	 * changes to static_memory_map are done during
108 	 * OP-TEE initialization, while this function will
109 	 * called when hypervisor creates a guest.
110 	 */
111 
112 	/* Count number of entries in nexus memory map */
113 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
114 	     map++, entries++)
115 		;
116 
117 	/* Allocate entries for virtual guest map */
118 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
119 	if (!map)
120 		return NULL;
121 
122 	memcpy(map, kmemory_map, sizeof(*map) * entries);
123 
124 	/* Map TEE .data and .bss sections */
125 	for (i = 0; i < entries; i++) {
126 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
127 			map[i].type = MEM_AREA_TEE_RAM_RW;
128 			map[i].attr = core_mmu_type_to_attr(map[i].type);
129 			map[i].pa = tee_data;
130 		}
131 		if (map[i].va + map[i].size > max_va)
132 			max_va = map[i].va + map[i].size;
133 	}
134 
135 	/* Map TA_RAM */
136 	assert(map[entries - 1].type == MEM_AREA_END);
137 	map[entries] = map[entries - 1];
138 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
139 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
140 	map[entries - 1].va +=
141 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
142 	map[entries - 1].pa = ta_ram;
143 	map[entries - 1].size = get_ta_ram_size();
144 	map[entries - 1].type = MEM_AREA_TA_RAM;
145 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
146 
147 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
148 
149 	for (i = 0; i < entries; i++)
150 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
151 		     teecore_memtype_name(map[i].type),
152 		     map[i].region_size, map[i].pa, map[i].va,
153 		     map[i].size, map[i].attr);
154 	return map;
155 }
156 
157 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
158 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
159 		      paddr_size_t secmem1_size)
160 {
161 	struct tee_mmap_region *map = NULL;
162 	paddr_size_t size = secmem0_size;
163 	paddr_t base = secmem0_base;
164 
165 	if (secmem1_size) {
166 		assert(secmem0_base + secmem0_size <= secmem1_base);
167 		size = secmem1_base + secmem1_size - base;
168 	}
169 
170 	/* Init page pool that covers all secure RAM */
171 	if (!tee_mm_init(&virt_mapper_pool, base, size,
172 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
173 		panic("Can't create pool with free pages");
174 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
175 	     base, base + size);
176 
177 	if (secmem1_size) {
178 		/* Carve out an eventual gap between secmem0 and secmem1 */
179 		base = secmem0_base + secmem0_size;
180 		size = secmem1_base - base;
181 		if (size) {
182 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
183 			     base, size);
184 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
185 				panic("Can't carve out secmem gap");
186 		}
187 	}
188 
189 
190 	/* Carve out areas that are used by OP-TEE core */
191 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
192 		switch (map->type) {
193 		case MEM_AREA_TEE_RAM_RX:
194 		case MEM_AREA_TEE_RAM_RO:
195 		case MEM_AREA_NEX_RAM_RO:
196 		case MEM_AREA_NEX_RAM_RW:
197 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
198 			     map->type, map->pa, map->pa + map->size);
199 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
200 					   map->size))
201 				panic("Can't carve out used area");
202 			break;
203 		default:
204 			continue;
205 		}
206 	}
207 
208 	kmemory_map = memory_map;
209 }
210 
211 
212 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
213 {
214 	TEE_Result res = TEE_SUCCESS;
215 	paddr_t original_data_pa = 0;
216 
217 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
218 	if (!prtn->tee_ram) {
219 		EMSG("Can't allocate memory for TEE runtime context");
220 		res = TEE_ERROR_OUT_OF_MEMORY;
221 		goto err;
222 	}
223 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
224 
225 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
226 	if (!prtn->ta_ram) {
227 		EMSG("Can't allocate memory for TA data");
228 		res = TEE_ERROR_OUT_OF_MEMORY;
229 		goto err;
230 	}
231 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
232 
233 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
234 				   core_mmu_get_total_pages_size());
235 	if (!prtn->tables) {
236 		EMSG("Can't allocate memory for page tables");
237 		res = TEE_ERROR_OUT_OF_MEMORY;
238 		goto err;
239 	}
240 
241 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
242 				      MEM_AREA_SEC_RAM_OVERALL,
243 				      core_mmu_get_total_pages_size());
244 	assert(prtn->tables_va);
245 
246 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
247 	if (!prtn->mmu_prtn) {
248 		res = TEE_ERROR_OUT_OF_MEMORY;
249 		goto err;
250 	}
251 
252 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
253 					     tee_mm_get_smem(prtn->ta_ram));
254 	if (!prtn->memory_map) {
255 		res = TEE_ERROR_OUT_OF_MEMORY;
256 		goto err;
257 	}
258 
259 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
260 
261 	original_data_pa = virt_to_phys(__data_start);
262 	/* Switch to guest's mappings */
263 	core_mmu_set_prtn(prtn->mmu_prtn);
264 
265 	/* clear .bss */
266 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
267 
268 	/* copy .data section from R/O original */
269 	memcpy(__data_start,
270 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
271 			    __data_end - __data_start),
272 	       __data_end - __data_start);
273 
274 	return TEE_SUCCESS;
275 
276 err:
277 	if (prtn->tee_ram)
278 		tee_mm_free(prtn->tee_ram);
279 	if (prtn->ta_ram)
280 		tee_mm_free(prtn->ta_ram);
281 	if (prtn->tables)
282 		tee_mm_free(prtn->tables);
283 	nex_free(prtn->mmu_prtn);
284 	nex_free(prtn->memory_map);
285 
286 	return res;
287 }
288 
289 TEE_Result virt_guest_created(uint16_t guest_id)
290 {
291 	struct guest_partition *prtn = NULL;
292 	TEE_Result res = TEE_SUCCESS;
293 	uint32_t exceptions = 0;
294 
295 	prtn = nex_calloc(1, sizeof(*prtn));
296 	if (!prtn)
297 		return TEE_ERROR_OUT_OF_MEMORY;
298 
299 	prtn->id = guest_id;
300 	mutex_init(&prtn->mutex);
301 	refcount_set(&prtn->refc, 1);
302 	res = configure_guest_prtn_mem(prtn);
303 	if (res) {
304 		nex_free(prtn);
305 		return res;
306 	}
307 
308 	set_current_prtn(prtn);
309 
310 	/* Initialize threads */
311 	thread_init_threads();
312 	/* Do the preinitcalls */
313 	call_preinitcalls();
314 
315 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
316 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
317 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
318 
319 	IMSG("Added guest %d", guest_id);
320 
321 	set_current_prtn(NULL);
322 	core_mmu_set_default_prtn();
323 
324 	return TEE_SUCCESS;
325 }
326 
327 TEE_Result virt_guest_destroyed(uint16_t guest_id)
328 {
329 	struct guest_partition *prtn;
330 	uint32_t exceptions;
331 
332 	IMSG("Removing guest %d", guest_id);
333 
334 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
335 
336 	LIST_FOREACH(prtn, &prtn_list, link) {
337 		if (prtn->id == guest_id) {
338 			LIST_REMOVE(prtn, link);
339 			break;
340 		}
341 	}
342 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
343 
344 	if (prtn) {
345 		if (!refcount_dec(&prtn->refc)) {
346 			EMSG("Guest thread(s) is still running. refc = %d",
347 			     refcount_val(&prtn->refc));
348 			panic();
349 		}
350 
351 		tee_mm_free(prtn->tee_ram);
352 		tee_mm_free(prtn->ta_ram);
353 		tee_mm_free(prtn->tables);
354 		core_free_mmu_prtn(prtn->mmu_prtn);
355 		nex_free(prtn->memory_map);
356 		nex_free(prtn);
357 	} else
358 		EMSG("Client with id %d is not found", guest_id);
359 
360 	return TEE_SUCCESS;
361 }
362 
363 TEE_Result virt_set_guest(uint16_t guest_id)
364 {
365 	struct guest_partition *prtn;
366 	uint32_t exceptions;
367 
368 	prtn = get_current_prtn();
369 
370 	/* This can be true only if we return from IRQ RPC */
371 	if (prtn && prtn->id == guest_id)
372 		return TEE_SUCCESS;
373 
374 	if (prtn)
375 		panic("Virtual guest partition is already set");
376 
377 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
378 	LIST_FOREACH(prtn, &prtn_list, link) {
379 		if (prtn->id == guest_id) {
380 			set_current_prtn(prtn);
381 			core_mmu_set_prtn(prtn->mmu_prtn);
382 			refcount_inc(&prtn->refc);
383 			cpu_spin_unlock_xrestore(&prtn_list_lock,
384 						 exceptions);
385 			return TEE_SUCCESS;
386 		}
387 	}
388 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
389 
390 	return TEE_ERROR_ITEM_NOT_FOUND;
391 }
392 
393 void virt_unset_guest(void)
394 {
395 	struct guest_partition *prtn = get_current_prtn();
396 
397 	if (!prtn)
398 		return;
399 
400 	set_current_prtn(NULL);
401 	core_mmu_set_default_prtn();
402 	if (refcount_dec(&prtn->refc))
403 		panic();
404 }
405 
406 void virt_on_stdcall(void)
407 {
408 	struct guest_partition *prtn = get_current_prtn();
409 
410 	/* Initialize runtime on first std call */
411 	if (!prtn->runtime_initialized) {
412 		mutex_lock(&prtn->mutex);
413 		if (!prtn->runtime_initialized) {
414 			init_tee_runtime();
415 			prtn->runtime_initialized = true;
416 		}
417 		mutex_unlock(&prtn->mutex);
418 	}
419 }
420 
421 struct tee_mmap_region *virt_get_memory_map(void)
422 {
423 	struct guest_partition *prtn;
424 
425 	prtn = get_current_prtn();
426 
427 	if (!prtn)
428 		return NULL;
429 
430 	return prtn->memory_map;
431 }
432 
433 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
434 {
435 	struct guest_partition *prtn = get_current_prtn();
436 
437 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
438 				       MEM_AREA_TA_RAM,
439 				       tee_mm_get_bytes(prtn->ta_ram));
440 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
441 }
442 
443 #ifdef CFG_CORE_SEL1_SPMC
444 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
445 {
446 	int i = 0;
447 
448 	for (i = 0; i < prtn->cookie_count; i++)
449 		if (prtn->cookies[i] == cookie)
450 			return i;
451 	return -1;
452 }
453 
454 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
455 {
456 	struct guest_partition *prtn = NULL;
457 	int i = 0;
458 
459 	LIST_FOREACH(prtn, &prtn_list, link) {
460 		i = find_cookie(prtn, cookie);
461 		if (i >= 0) {
462 			if (idx)
463 				*idx = i;
464 			return prtn;
465 		}
466 	}
467 
468 	return NULL;
469 }
470 
471 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
472 {
473 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
474 	struct guest_partition *prtn = NULL;
475 	uint32_t exceptions = 0;
476 
477 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
478 	if (find_prtn_cookie(cookie, NULL))
479 		goto out;
480 
481 	prtn = current_partition[get_core_pos()];
482 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
483 		prtn->cookies[prtn->cookie_count] = cookie;
484 		prtn->cookie_count++;
485 		res = TEE_SUCCESS;
486 	}
487 out:
488 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
489 
490 	return res;
491 }
492 
493 void virt_remove_cookie(uint64_t cookie)
494 {
495 	struct guest_partition *prtn = NULL;
496 	uint32_t exceptions = 0;
497 	int i = 0;
498 
499 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
500 	prtn = find_prtn_cookie(cookie, &i);
501 	if (prtn) {
502 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
503 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
504 		prtn->cookie_count--;
505 	}
506 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
507 }
508 
509 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
510 {
511 	struct guest_partition *prtn = NULL;
512 	uint32_t exceptions = 0;
513 	uint16_t ret = 0;
514 
515 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
516 	prtn = find_prtn_cookie(cookie, NULL);
517 	if (prtn)
518 		ret = prtn->id;
519 
520 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
521 
522 	return ret;
523 }
524 
525 bitstr_t *virt_get_shm_bits(void)
526 {
527 	return get_current_prtn()->shm_bits;
528 }
529 #endif /*CFG_CORE_SEL1_SPMC*/
530