xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 070d197fa568917d4b32fa2b379098715016c52d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, EPAM Systems. All rights reserved.
4  * Copyright (c) 2023, Linaro Limited
5  */
6 
7 #include <compiler.h>
8 #include <kernel/boot.h>
9 #include <kernel/linker.h>
10 #include <kernel/misc.h>
11 #include <kernel/mutex.h>
12 #include <kernel/panic.h>
13 #include <kernel/refcount.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/thread_spmc.h>
16 #include <kernel/virtualization.h>
17 #include <mm/core_memprot.h>
18 #include <mm/core_mmu.h>
19 #include <mm/tee_mm.h>
20 #include <platform_config.h>
21 #include <sm/optee_smc.h>
22 #include <string.h>
23 #include <util.h>
24 
25 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
26 
27 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
28 	LIST_HEAD_INITIALIZER(prtn_list_head);
29 
30 /* Free pages used for guest partitions */
31 tee_mm_pool_t virt_mapper_pool __nex_bss;
32 
33 /* Memory used by OP-TEE core */
34 struct tee_mmap_region *kmemory_map __nex_bss;
35 
36 struct guest_partition {
37 	LIST_ENTRY(guest_partition) link;
38 	struct mmu_partition *mmu_prtn;
39 	struct tee_mmap_region *memory_map;
40 	struct mutex mutex;
41 	void *tables_va;
42 	tee_mm_entry_t *tee_ram;
43 	tee_mm_entry_t *ta_ram;
44 	tee_mm_entry_t *tables;
45 	bool runtime_initialized;
46 	uint16_t id;
47 	struct refcount refc;
48 #ifdef CFG_CORE_SEL1_SPMC
49 	uint64_t cookies[SPMC_CORE_SEL1_MAX_SHM_COUNT];
50 	uint8_t cookie_count;
51 #endif
52 };
53 
54 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
55 
56 static struct guest_partition *get_current_prtn(void)
57 {
58 	struct guest_partition *ret;
59 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
60 
61 	ret = current_partition[get_core_pos()];
62 
63 	thread_unmask_exceptions(exceptions);
64 
65 	return ret;
66 }
67 
68 uint16_t virt_get_current_guest_id(void)
69 {
70 	struct guest_partition *prtn = get_current_prtn();
71 
72 	if (!prtn)
73 		return 0;
74 	return prtn->id;
75 }
76 
77 static void set_current_prtn(struct guest_partition *prtn)
78 {
79 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
80 
81 	current_partition[get_core_pos()] = prtn;
82 
83 	thread_unmask_exceptions(exceptions);
84 }
85 
86 static size_t get_ta_ram_size(void)
87 {
88 	size_t ta_size = 0;
89 
90 	core_mmu_get_ta_range(NULL, &ta_size);
91 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
92 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
93 }
94 
95 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
96 						  paddr_t ta_ram)
97 {
98 	int i, entries;
99 	vaddr_t max_va = 0;
100 	struct tee_mmap_region *map;
101 	/*
102 	 * This function assumes that at time of operation,
103 	 * kmemory_map (aka static_memory_map from core_mmu.c)
104 	 * will not be altered. This is true, because all
105 	 * changes to static_memory_map are done during
106 	 * OP-TEE initialization, while this function will
107 	 * called when hypervisor creates a guest.
108 	 */
109 
110 	/* Count number of entries in nexus memory map */
111 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
112 	     map++, entries++)
113 		;
114 
115 	/* Allocate entries for virtual guest map */
116 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
117 	if (!map)
118 		return NULL;
119 
120 	memcpy(map, kmemory_map, sizeof(*map) * entries);
121 
122 	/* Map TEE .data and .bss sections */
123 	for (i = 0; i < entries; i++) {
124 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
125 			map[i].type = MEM_AREA_TEE_RAM_RW;
126 			map[i].attr = core_mmu_type_to_attr(map[i].type);
127 			map[i].pa = tee_data;
128 		}
129 		if (map[i].va + map[i].size > max_va)
130 			max_va = map[i].va + map[i].size;
131 	}
132 
133 	/* Map TA_RAM */
134 	assert(map[entries - 1].type == MEM_AREA_END);
135 	map[entries] = map[entries - 1];
136 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
137 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
138 	map[entries - 1].va +=
139 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
140 	map[entries - 1].pa = ta_ram;
141 	map[entries - 1].size = get_ta_ram_size();
142 	map[entries - 1].type = MEM_AREA_TA_RAM;
143 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
144 
145 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
146 
147 	for (i = 0; i < entries; i++)
148 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
149 		     teecore_memtype_name(map[i].type),
150 		     map[i].region_size, map[i].pa, map[i].va,
151 		     map[i].size, map[i].attr);
152 	return map;
153 }
154 
155 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
156 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
157 		      paddr_size_t secmem1_size)
158 {
159 	struct tee_mmap_region *map = NULL;
160 	paddr_size_t size = secmem0_size;
161 	paddr_t base = secmem0_base;
162 
163 	if (secmem1_size) {
164 		assert(secmem0_base + secmem0_size <= secmem1_base);
165 		size = secmem1_base + secmem1_size - base;
166 	}
167 
168 	/* Init page pool that covers all secure RAM */
169 	if (!tee_mm_init(&virt_mapper_pool, base, size,
170 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
171 		panic("Can't create pool with free pages");
172 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
173 	     base, base + size);
174 
175 	if (secmem1_size) {
176 		/* Carve out an eventual gap between secmem0 and secmem1 */
177 		base = secmem0_base + secmem0_size;
178 		size = secmem1_base - base;
179 		if (size) {
180 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
181 			     base, size);
182 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
183 				panic("Can't carve out secmem gap");
184 		}
185 	}
186 
187 
188 	/* Carve out areas that are used by OP-TEE core */
189 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
190 		switch (map->type) {
191 		case MEM_AREA_TEE_RAM_RX:
192 		case MEM_AREA_TEE_RAM_RO:
193 		case MEM_AREA_NEX_RAM_RO:
194 		case MEM_AREA_NEX_RAM_RW:
195 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
196 			     map->type, map->pa, map->pa + map->size);
197 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
198 					   map->size))
199 				panic("Can't carve out used area");
200 			break;
201 		default:
202 			continue;
203 		}
204 	}
205 
206 	kmemory_map = memory_map;
207 }
208 
209 
210 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
211 {
212 	TEE_Result res = TEE_SUCCESS;
213 	paddr_t original_data_pa = 0;
214 
215 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
216 	if (!prtn->tee_ram) {
217 		EMSG("Can't allocate memory for TEE runtime context");
218 		res = TEE_ERROR_OUT_OF_MEMORY;
219 		goto err;
220 	}
221 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
222 
223 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
224 	if (!prtn->ta_ram) {
225 		EMSG("Can't allocate memory for TA data");
226 		res = TEE_ERROR_OUT_OF_MEMORY;
227 		goto err;
228 	}
229 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
230 
231 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
232 				   core_mmu_get_total_pages_size());
233 	if (!prtn->tables) {
234 		EMSG("Can't allocate memory for page tables");
235 		res = TEE_ERROR_OUT_OF_MEMORY;
236 		goto err;
237 	}
238 
239 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
240 				      MEM_AREA_SEC_RAM_OVERALL,
241 				      core_mmu_get_total_pages_size());
242 	assert(prtn->tables_va);
243 
244 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
245 	if (!prtn->mmu_prtn) {
246 		res = TEE_ERROR_OUT_OF_MEMORY;
247 		goto err;
248 	}
249 
250 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
251 					     tee_mm_get_smem(prtn->ta_ram));
252 	if (!prtn->memory_map) {
253 		res = TEE_ERROR_OUT_OF_MEMORY;
254 		goto err;
255 	}
256 
257 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
258 
259 	original_data_pa = virt_to_phys(__data_start);
260 	/* Switch to guest's mappings */
261 	core_mmu_set_prtn(prtn->mmu_prtn);
262 
263 	/* clear .bss */
264 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
265 
266 	/* copy .data section from R/O original */
267 	memcpy(__data_start,
268 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
269 			    __data_end - __data_start),
270 	       __data_end - __data_start);
271 
272 	return TEE_SUCCESS;
273 
274 err:
275 	if (prtn->tee_ram)
276 		tee_mm_free(prtn->tee_ram);
277 	if (prtn->ta_ram)
278 		tee_mm_free(prtn->ta_ram);
279 	if (prtn->tables)
280 		tee_mm_free(prtn->tables);
281 	nex_free(prtn->mmu_prtn);
282 	nex_free(prtn->memory_map);
283 
284 	return res;
285 }
286 
287 TEE_Result virt_guest_created(uint16_t guest_id)
288 {
289 	struct guest_partition *prtn = NULL;
290 	TEE_Result res = TEE_SUCCESS;
291 	uint32_t exceptions = 0;
292 
293 	prtn = nex_calloc(1, sizeof(*prtn));
294 	if (!prtn)
295 		return TEE_ERROR_OUT_OF_MEMORY;
296 
297 	prtn->id = guest_id;
298 	mutex_init(&prtn->mutex);
299 	refcount_set(&prtn->refc, 1);
300 	res = configure_guest_prtn_mem(prtn);
301 	if (res) {
302 		nex_free(prtn);
303 		return res;
304 	}
305 
306 	set_current_prtn(prtn);
307 
308 	/* Initialize threads */
309 	thread_init_threads();
310 	/* Do the preinitcalls */
311 	call_preinitcalls();
312 
313 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
314 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
315 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
316 
317 	IMSG("Added guest %d", guest_id);
318 
319 	set_current_prtn(NULL);
320 	core_mmu_set_default_prtn();
321 
322 	return TEE_SUCCESS;
323 }
324 
325 TEE_Result virt_guest_destroyed(uint16_t guest_id)
326 {
327 	struct guest_partition *prtn;
328 	uint32_t exceptions;
329 
330 	IMSG("Removing guest %d", guest_id);
331 
332 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
333 
334 	LIST_FOREACH(prtn, &prtn_list, link) {
335 		if (prtn->id == guest_id) {
336 			LIST_REMOVE(prtn, link);
337 			break;
338 		}
339 	}
340 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
341 
342 	if (prtn) {
343 		if (!refcount_dec(&prtn->refc)) {
344 			EMSG("Guest thread(s) is still running. refc = %d",
345 			     refcount_val(&prtn->refc));
346 			panic();
347 		}
348 
349 		tee_mm_free(prtn->tee_ram);
350 		tee_mm_free(prtn->ta_ram);
351 		tee_mm_free(prtn->tables);
352 		core_free_mmu_prtn(prtn->mmu_prtn);
353 		nex_free(prtn->memory_map);
354 		nex_free(prtn);
355 	} else
356 		EMSG("Client with id %d is not found", guest_id);
357 
358 	return TEE_SUCCESS;
359 }
360 
361 TEE_Result virt_set_guest(uint16_t guest_id)
362 {
363 	struct guest_partition *prtn;
364 	uint32_t exceptions;
365 
366 	prtn = get_current_prtn();
367 
368 	/* This can be true only if we return from IRQ RPC */
369 	if (prtn && prtn->id == guest_id)
370 		return TEE_SUCCESS;
371 
372 	if (prtn)
373 		panic("Virtual guest partition is already set");
374 
375 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
376 	LIST_FOREACH(prtn, &prtn_list, link) {
377 		if (prtn->id == guest_id) {
378 			set_current_prtn(prtn);
379 			core_mmu_set_prtn(prtn->mmu_prtn);
380 			refcount_inc(&prtn->refc);
381 			cpu_spin_unlock_xrestore(&prtn_list_lock,
382 						 exceptions);
383 			return TEE_SUCCESS;
384 		}
385 	}
386 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
387 
388 	return TEE_ERROR_ITEM_NOT_FOUND;
389 }
390 
391 void virt_unset_guest(void)
392 {
393 	struct guest_partition *prtn = get_current_prtn();
394 
395 	if (!prtn)
396 		return;
397 
398 	set_current_prtn(NULL);
399 	core_mmu_set_default_prtn();
400 	if (refcount_dec(&prtn->refc))
401 		panic();
402 }
403 
404 void virt_on_stdcall(void)
405 {
406 	struct guest_partition *prtn = get_current_prtn();
407 
408 	/* Initialize runtime on first std call */
409 	if (!prtn->runtime_initialized) {
410 		mutex_lock(&prtn->mutex);
411 		if (!prtn->runtime_initialized) {
412 			init_tee_runtime();
413 			prtn->runtime_initialized = true;
414 		}
415 		mutex_unlock(&prtn->mutex);
416 	}
417 }
418 
419 struct tee_mmap_region *virt_get_memory_map(void)
420 {
421 	struct guest_partition *prtn;
422 
423 	prtn = get_current_prtn();
424 
425 	if (!prtn)
426 		return NULL;
427 
428 	return prtn->memory_map;
429 }
430 
431 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
432 {
433 	struct guest_partition *prtn = get_current_prtn();
434 
435 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
436 				       MEM_AREA_TA_RAM,
437 				       tee_mm_get_bytes(prtn->ta_ram));
438 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
439 }
440 
441 #ifdef CFG_CORE_SEL1_SPMC
442 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
443 {
444 	int i = 0;
445 
446 	for (i = 0; i < prtn->cookie_count; i++)
447 		if (prtn->cookies[i] == cookie)
448 			return i;
449 	return -1;
450 }
451 
452 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
453 {
454 	struct guest_partition *prtn = NULL;
455 	int i = 0;
456 
457 	LIST_FOREACH(prtn, &prtn_list, link) {
458 		i = find_cookie(prtn, cookie);
459 		if (i >= 0) {
460 			if (idx)
461 				*idx = i;
462 			return prtn;
463 		}
464 	}
465 
466 	return NULL;
467 }
468 
469 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
470 {
471 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
472 	struct guest_partition *prtn = NULL;
473 	uint32_t exceptions = 0;
474 
475 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
476 	if (find_prtn_cookie(cookie, NULL))
477 		goto out;
478 
479 	prtn = current_partition[get_core_pos()];
480 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
481 		prtn->cookies[prtn->cookie_count] = cookie;
482 		prtn->cookie_count++;
483 		res = TEE_SUCCESS;
484 	}
485 out:
486 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
487 
488 	return res;
489 }
490 
491 void virt_remove_cookie(uint64_t cookie)
492 {
493 	struct guest_partition *prtn = NULL;
494 	uint32_t exceptions = 0;
495 	int i = 0;
496 
497 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
498 	prtn = find_prtn_cookie(cookie, &i);
499 	if (prtn) {
500 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
501 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
502 		prtn->cookie_count--;
503 	}
504 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
505 }
506 
507 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
508 {
509 	struct guest_partition *prtn = NULL;
510 	uint32_t exceptions = 0;
511 	uint16_t ret = 0;
512 
513 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
514 	prtn = find_prtn_cookie(cookie, NULL);
515 	if (prtn)
516 		ret = prtn->id;
517 
518 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
519 
520 	return ret;
521 }
522 #endif /*CFG_CORE_SEL1_SPMC*/
523