xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 209c34dc03563af70f1e406f304008495dae7a5e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3 
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21 
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23 
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 	LIST_HEAD_INITIALIZER(prtn_list_head);
26 
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29 
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32 
33 struct guest_partition {
34 	LIST_ENTRY(guest_partition) link;
35 	struct mmu_partition *mmu_prtn;
36 	struct tee_mmap_region *memory_map;
37 	struct mutex mutex;
38 	void *tables_va;
39 	tee_mm_entry_t *tee_ram;
40 	tee_mm_entry_t *ta_ram;
41 	tee_mm_entry_t *tables;
42 	bool runtime_initialized;
43 	uint16_t id;
44 	struct refcount refc;
45 #ifdef CFG_CORE_SEL1_SPMC
46 	uint64_t cookies[64];
47 	uint8_t cookie_count;
48 #endif
49 };
50 
51 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
52 
53 static struct guest_partition *get_current_prtn(void)
54 {
55 	struct guest_partition *ret;
56 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
57 
58 	ret = current_partition[get_core_pos()];
59 
60 	thread_unmask_exceptions(exceptions);
61 
62 	return ret;
63 }
64 
65 uint16_t virt_get_current_guest_id(void)
66 {
67 	struct guest_partition *prtn = get_current_prtn();
68 
69 	if (!prtn)
70 		return 0;
71 	return prtn->id;
72 }
73 
74 static void set_current_prtn(struct guest_partition *prtn)
75 {
76 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
77 
78 	current_partition[get_core_pos()] = prtn;
79 
80 	thread_unmask_exceptions(exceptions);
81 }
82 
83 static size_t get_ta_ram_size(void)
84 {
85 	size_t ta_size = 0;
86 
87 	core_mmu_get_ta_range(NULL, &ta_size);
88 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
89 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
90 }
91 
92 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
93 						  paddr_t ta_ram)
94 {
95 	int i, entries;
96 	vaddr_t max_va = 0;
97 	struct tee_mmap_region *map;
98 	/*
99 	 * This function assumes that at time of operation,
100 	 * kmemory_map (aka static_memory_map from core_mmu.c)
101 	 * will not be altered. This is true, because all
102 	 * changes to static_memory_map are done during
103 	 * OP-TEE initialization, while this function will
104 	 * called when hypervisor creates a guest.
105 	 */
106 
107 	/* Count number of entries in nexus memory map */
108 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
109 	     map++, entries++)
110 		;
111 
112 	/* Allocate entries for virtual guest map */
113 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
114 	if (!map)
115 		return NULL;
116 
117 	memcpy(map, kmemory_map, sizeof(*map) * entries);
118 
119 	/* Map TEE .data and .bss sections */
120 	for (i = 0; i < entries; i++) {
121 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
122 			map[i].type = MEM_AREA_TEE_RAM_RW;
123 			map[i].attr = core_mmu_type_to_attr(map[i].type);
124 			map[i].pa = tee_data;
125 		}
126 		if (map[i].va + map[i].size > max_va)
127 			max_va = map[i].va + map[i].size;
128 	}
129 
130 	/* Map TA_RAM */
131 	assert(map[entries - 1].type == MEM_AREA_END);
132 	map[entries] = map[entries - 1];
133 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
134 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
135 	map[entries - 1].va +=
136 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
137 	map[entries - 1].pa = ta_ram;
138 	map[entries - 1].size = get_ta_ram_size();
139 	map[entries - 1].type = MEM_AREA_TA_RAM;
140 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
141 
142 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
143 
144 	for (i = 0; i < entries; i++)
145 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
146 		     teecore_memtype_name(map[i].type),
147 		     map[i].region_size, map[i].pa, map[i].va,
148 		     map[i].size, map[i].attr);
149 	return map;
150 }
151 
152 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
153 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
154 		      paddr_size_t secmem1_size)
155 {
156 	struct tee_mmap_region *map = NULL;
157 	paddr_size_t size = secmem0_size;
158 	paddr_t base = secmem0_base;
159 
160 	if (secmem1_size) {
161 		assert(secmem0_base + secmem0_size <= secmem1_base);
162 		size = secmem1_base + secmem1_size - base;
163 	}
164 
165 	/* Init page pool that covers all secure RAM */
166 	if (!tee_mm_init(&virt_mapper_pool, base, size,
167 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
168 		panic("Can't create pool with free pages");
169 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
170 	     base, base + size);
171 
172 	if (secmem1_size) {
173 		/* Carve out an eventual gap between secmem0 and secmem1 */
174 		base = secmem0_base + secmem0_size;
175 		size = secmem1_base - base;
176 		if (size) {
177 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
178 			     base, size);
179 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
180 				panic("Can't carve out secmem gap");
181 		}
182 	}
183 
184 
185 	/* Carve out areas that are used by OP-TEE core */
186 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
187 		switch (map->type) {
188 		case MEM_AREA_TEE_RAM_RX:
189 		case MEM_AREA_TEE_RAM_RO:
190 		case MEM_AREA_NEX_RAM_RO:
191 		case MEM_AREA_NEX_RAM_RW:
192 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
193 			     map->type, map->pa, map->pa + map->size);
194 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
195 					   map->size))
196 				panic("Can't carve out used area");
197 			break;
198 		default:
199 			continue;
200 		}
201 	}
202 
203 	kmemory_map = memory_map;
204 }
205 
206 
207 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
208 {
209 	TEE_Result res = TEE_SUCCESS;
210 	paddr_t original_data_pa = 0;
211 
212 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
213 	if (!prtn->tee_ram) {
214 		EMSG("Can't allocate memory for TEE runtime context");
215 		res = TEE_ERROR_OUT_OF_MEMORY;
216 		goto err;
217 	}
218 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
219 
220 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
221 	if (!prtn->ta_ram) {
222 		EMSG("Can't allocate memory for TA data");
223 		res = TEE_ERROR_OUT_OF_MEMORY;
224 		goto err;
225 	}
226 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
227 
228 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
229 				   core_mmu_get_total_pages_size());
230 	if (!prtn->tables) {
231 		EMSG("Can't allocate memory for page tables");
232 		res = TEE_ERROR_OUT_OF_MEMORY;
233 		goto err;
234 	}
235 
236 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
237 				      MEM_AREA_SEC_RAM_OVERALL,
238 				      core_mmu_get_total_pages_size());
239 	assert(prtn->tables_va);
240 
241 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
242 	if (!prtn->mmu_prtn) {
243 		res = TEE_ERROR_OUT_OF_MEMORY;
244 		goto err;
245 	}
246 
247 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
248 					     tee_mm_get_smem(prtn->ta_ram));
249 	if (!prtn->memory_map) {
250 		res = TEE_ERROR_OUT_OF_MEMORY;
251 		goto err;
252 	}
253 
254 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
255 
256 	original_data_pa = virt_to_phys(__data_start);
257 	/* Switch to guest's mappings */
258 	core_mmu_set_prtn(prtn->mmu_prtn);
259 
260 	/* clear .bss */
261 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
262 
263 	/* copy .data section from R/O original */
264 	memcpy(__data_start,
265 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
266 			    __data_end - __data_start),
267 	       __data_end - __data_start);
268 
269 	return TEE_SUCCESS;
270 
271 err:
272 	if (prtn->tee_ram)
273 		tee_mm_free(prtn->tee_ram);
274 	if (prtn->ta_ram)
275 		tee_mm_free(prtn->ta_ram);
276 	if (prtn->tables)
277 		tee_mm_free(prtn->tables);
278 	nex_free(prtn->mmu_prtn);
279 	nex_free(prtn->memory_map);
280 
281 	return res;
282 }
283 
284 TEE_Result virt_guest_created(uint16_t guest_id)
285 {
286 	struct guest_partition *prtn = NULL;
287 	TEE_Result res = TEE_SUCCESS;
288 	uint32_t exceptions = 0;
289 
290 	prtn = nex_calloc(1, sizeof(*prtn));
291 	if (!prtn)
292 		return TEE_ERROR_OUT_OF_MEMORY;
293 
294 	prtn->id = guest_id;
295 	mutex_init(&prtn->mutex);
296 	refcount_set(&prtn->refc, 1);
297 	res = configure_guest_prtn_mem(prtn);
298 	if (res) {
299 		nex_free(prtn);
300 		return res;
301 	}
302 
303 	set_current_prtn(prtn);
304 
305 	/* Initialize threads */
306 	thread_init_threads();
307 	/* Do the preinitcalls */
308 	call_preinitcalls();
309 
310 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
311 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
312 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
313 
314 	IMSG("Added guest %d", guest_id);
315 
316 	set_current_prtn(NULL);
317 	core_mmu_set_default_prtn();
318 
319 	return TEE_SUCCESS;
320 }
321 
322 TEE_Result virt_guest_destroyed(uint16_t guest_id)
323 {
324 	struct guest_partition *prtn;
325 	uint32_t exceptions;
326 
327 	IMSG("Removing guest %d", guest_id);
328 
329 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
330 
331 	LIST_FOREACH(prtn, &prtn_list, link) {
332 		if (prtn->id == guest_id) {
333 			LIST_REMOVE(prtn, link);
334 			break;
335 		}
336 	}
337 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
338 
339 	if (prtn) {
340 		if (!refcount_dec(&prtn->refc)) {
341 			EMSG("Guest thread(s) is still running. refc = %d",
342 			     refcount_val(&prtn->refc));
343 			panic();
344 		}
345 
346 		tee_mm_free(prtn->tee_ram);
347 		tee_mm_free(prtn->ta_ram);
348 		tee_mm_free(prtn->tables);
349 		core_free_mmu_prtn(prtn->mmu_prtn);
350 		nex_free(prtn->memory_map);
351 		nex_free(prtn);
352 	} else
353 		EMSG("Client with id %d is not found", guest_id);
354 
355 	return TEE_SUCCESS;
356 }
357 
358 TEE_Result virt_set_guest(uint16_t guest_id)
359 {
360 	struct guest_partition *prtn;
361 	uint32_t exceptions;
362 
363 	prtn = get_current_prtn();
364 
365 	/* This can be true only if we return from IRQ RPC */
366 	if (prtn && prtn->id == guest_id)
367 		return TEE_SUCCESS;
368 
369 	if (prtn)
370 		panic("Virtual guest partition is already set");
371 
372 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
373 	LIST_FOREACH(prtn, &prtn_list, link) {
374 		if (prtn->id == guest_id) {
375 			set_current_prtn(prtn);
376 			core_mmu_set_prtn(prtn->mmu_prtn);
377 			refcount_inc(&prtn->refc);
378 			cpu_spin_unlock_xrestore(&prtn_list_lock,
379 						 exceptions);
380 			return TEE_SUCCESS;
381 		}
382 	}
383 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
384 
385 	return TEE_ERROR_ITEM_NOT_FOUND;
386 }
387 
388 void virt_unset_guest(void)
389 {
390 	struct guest_partition *prtn = get_current_prtn();
391 
392 	if (!prtn)
393 		return;
394 
395 	set_current_prtn(NULL);
396 	core_mmu_set_default_prtn();
397 	if (refcount_dec(&prtn->refc))
398 		panic();
399 }
400 
401 void virt_on_stdcall(void)
402 {
403 	struct guest_partition *prtn = get_current_prtn();
404 
405 	/* Initialize runtime on first std call */
406 	if (!prtn->runtime_initialized) {
407 		mutex_lock(&prtn->mutex);
408 		if (!prtn->runtime_initialized) {
409 			init_tee_runtime();
410 			prtn->runtime_initialized = true;
411 		}
412 		mutex_unlock(&prtn->mutex);
413 	}
414 }
415 
416 struct tee_mmap_region *virt_get_memory_map(void)
417 {
418 	struct guest_partition *prtn;
419 
420 	prtn = get_current_prtn();
421 
422 	if (!prtn)
423 		return NULL;
424 
425 	return prtn->memory_map;
426 }
427 
428 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
429 {
430 	struct guest_partition *prtn = get_current_prtn();
431 
432 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
433 				       MEM_AREA_TA_RAM,
434 				       tee_mm_get_bytes(prtn->ta_ram));
435 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
436 }
437 
438 #ifdef CFG_CORE_SEL1_SPMC
439 static int find_cookie(struct guest_partition *prtn, uint64_t cookie)
440 {
441 	int i = 0;
442 
443 	for (i = 0; i < prtn->cookie_count; i++)
444 		if (prtn->cookies[i] == cookie)
445 			return i;
446 	return -1;
447 }
448 
449 static struct guest_partition *find_prtn_cookie(uint64_t cookie, int *idx)
450 {
451 	struct guest_partition *prtn = NULL;
452 	int i = 0;
453 
454 	LIST_FOREACH(prtn, &prtn_list, link) {
455 		i = find_cookie(prtn, cookie);
456 		if (i >= 0) {
457 			if (idx)
458 				*idx = i;
459 			return prtn;
460 		}
461 	}
462 
463 	return NULL;
464 }
465 
466 TEE_Result virt_add_cookie_to_current_guest(uint64_t cookie)
467 {
468 	TEE_Result res = TEE_ERROR_ACCESS_DENIED;
469 	struct guest_partition *prtn = NULL;
470 	uint32_t exceptions = 0;
471 
472 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
473 	if (find_prtn_cookie(cookie, NULL))
474 		goto out;
475 
476 	prtn = current_partition[get_core_pos()];
477 	if (prtn->cookie_count < ARRAY_SIZE(prtn->cookies)) {
478 		prtn->cookies[prtn->cookie_count] = cookie;
479 		prtn->cookie_count++;
480 		res = TEE_SUCCESS;
481 	}
482 out:
483 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
484 
485 	return res;
486 }
487 
488 void virt_remove_cookie(uint64_t cookie)
489 {
490 	struct guest_partition *prtn = NULL;
491 	uint32_t exceptions = 0;
492 	int i = 0;
493 
494 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
495 	prtn = find_prtn_cookie(cookie, &i);
496 	if (prtn) {
497 		memmove(prtn->cookies + i, prtn->cookies + i + 1,
498 			sizeof(uint64_t) * (prtn->cookie_count - i - 1));
499 		prtn->cookie_count--;
500 	}
501 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
502 }
503 
504 uint16_t virt_find_guest_by_cookie(uint64_t cookie)
505 {
506 	struct guest_partition *prtn = NULL;
507 	uint32_t exceptions = 0;
508 	uint16_t ret = 0;
509 
510 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
511 	prtn = find_prtn_cookie(cookie, NULL);
512 	if (prtn)
513 		ret = prtn->id;
514 
515 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
516 
517 	return ret;
518 }
519 #endif /*CFG_CORE_SEL1_SPMC*/
520