xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 79321a890df5eaaad7c0d0a8776656a8d6db3839)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3 
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21 
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23 
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 	LIST_HEAD_INITIALIZER(prtn_list_head);
26 
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29 
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32 
33 struct guest_partition {
34 	LIST_ENTRY(guest_partition) link;
35 	struct mmu_partition *mmu_prtn;
36 	struct tee_mmap_region *memory_map;
37 	struct mutex mutex;
38 	void *tables_va;
39 	tee_mm_entry_t *tee_ram;
40 	tee_mm_entry_t *ta_ram;
41 	tee_mm_entry_t *tables;
42 	bool runtime_initialized;
43 	uint16_t id;
44 	struct refcount refc;
45 };
46 
47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
48 
49 static struct guest_partition *get_current_prtn(void)
50 {
51 	struct guest_partition *ret;
52 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
53 
54 	ret = current_partition[get_core_pos()];
55 
56 	thread_unmask_exceptions(exceptions);
57 
58 	return ret;
59 }
60 
61 uint16_t virt_get_current_guest_id(void)
62 {
63 	struct guest_partition *prtn = get_current_prtn();
64 
65 	if (!prtn)
66 		return 0;
67 	return prtn->id;
68 }
69 
70 static void set_current_prtn(struct guest_partition *prtn)
71 {
72 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
73 
74 	current_partition[get_core_pos()] = prtn;
75 
76 	thread_unmask_exceptions(exceptions);
77 }
78 
79 static size_t get_ta_ram_size(void)
80 {
81 	size_t ta_size = 0;
82 
83 	core_mmu_get_ta_range(NULL, &ta_size);
84 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
85 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
86 }
87 
88 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
89 						  paddr_t ta_ram)
90 {
91 	int i, entries;
92 	vaddr_t max_va = 0;
93 	struct tee_mmap_region *map;
94 	/*
95 	 * This function assumes that at time of operation,
96 	 * kmemory_map (aka static_memory_map from core_mmu.c)
97 	 * will not be altered. This is true, because all
98 	 * changes to static_memory_map are done during
99 	 * OP-TEE initialization, while this function will
100 	 * called when hypervisor creates a guest.
101 	 */
102 
103 	/* Count number of entries in nexus memory map */
104 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
105 	     map++, entries++)
106 		;
107 
108 	/* Allocate entries for virtual guest map */
109 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
110 	if (!map)
111 		return NULL;
112 
113 	memcpy(map, kmemory_map, sizeof(*map) * entries);
114 
115 	/* Map TEE .data and .bss sections */
116 	for (i = 0; i < entries; i++) {
117 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
118 			map[i].type = MEM_AREA_TEE_RAM_RW;
119 			map[i].attr = core_mmu_type_to_attr(map[i].type);
120 			map[i].pa = tee_data;
121 		}
122 		if (map[i].va + map[i].size > max_va)
123 			max_va = map[i].va + map[i].size;
124 	}
125 
126 	/* Map TA_RAM */
127 	assert(map[entries - 1].type == MEM_AREA_END);
128 	map[entries] = map[entries - 1];
129 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
130 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
131 	map[entries - 1].va +=
132 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
133 	map[entries - 1].pa = ta_ram;
134 	map[entries - 1].size = get_ta_ram_size();
135 	map[entries - 1].type = MEM_AREA_TA_RAM;
136 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
137 
138 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
139 
140 	for (i = 0; i < entries; i++)
141 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
142 		     teecore_memtype_name(map[i].type),
143 		     map[i].region_size, map[i].pa, map[i].va,
144 		     map[i].size, map[i].attr);
145 	return map;
146 }
147 
148 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
149 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
150 		      paddr_size_t secmem1_size)
151 {
152 	struct tee_mmap_region *map = NULL;
153 	paddr_size_t size = secmem0_size;
154 	paddr_t base = secmem0_base;
155 
156 	if (secmem1_size) {
157 		assert(secmem0_base + secmem0_size <= secmem1_base);
158 		size = secmem1_base + secmem1_size - base;
159 	}
160 
161 	/* Init page pool that covers all secure RAM */
162 	if (!tee_mm_init(&virt_mapper_pool, base, size,
163 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
164 		panic("Can't create pool with free pages");
165 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
166 	     base, base + size);
167 
168 	if (secmem1_size) {
169 		/* Carve out an eventual gap between secmem0 and secmem1 */
170 		base = secmem0_base + secmem0_size;
171 		size = secmem1_base - base;
172 		if (size) {
173 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
174 			     base, size);
175 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
176 				panic("Can't carve out secmem gap");
177 		}
178 	}
179 
180 
181 	/* Carve out areas that are used by OP-TEE core */
182 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
183 		switch (map->type) {
184 		case MEM_AREA_TEE_RAM_RX:
185 		case MEM_AREA_TEE_RAM_RO:
186 		case MEM_AREA_NEX_RAM_RO:
187 		case MEM_AREA_NEX_RAM_RW:
188 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
189 			     map->type, map->pa, map->pa + map->size);
190 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
191 					   map->size))
192 				panic("Can't carve out used area");
193 			break;
194 		default:
195 			continue;
196 		}
197 	}
198 
199 	kmemory_map = memory_map;
200 }
201 
202 
203 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
204 {
205 	TEE_Result res = TEE_SUCCESS;
206 	paddr_t original_data_pa = 0;
207 
208 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
209 	if (!prtn->tee_ram) {
210 		EMSG("Can't allocate memory for TEE runtime context");
211 		res = TEE_ERROR_OUT_OF_MEMORY;
212 		goto err;
213 	}
214 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
215 
216 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
217 	if (!prtn->ta_ram) {
218 		EMSG("Can't allocate memory for TA data");
219 		res = TEE_ERROR_OUT_OF_MEMORY;
220 		goto err;
221 	}
222 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
223 
224 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
225 				   core_mmu_get_total_pages_size());
226 	if (!prtn->tables) {
227 		EMSG("Can't allocate memory for page tables");
228 		res = TEE_ERROR_OUT_OF_MEMORY;
229 		goto err;
230 	}
231 
232 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
233 				      MEM_AREA_SEC_RAM_OVERALL,
234 				      core_mmu_get_total_pages_size());
235 	assert(prtn->tables_va);
236 
237 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
238 	if (!prtn->mmu_prtn) {
239 		res = TEE_ERROR_OUT_OF_MEMORY;
240 		goto err;
241 	}
242 
243 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
244 					     tee_mm_get_smem(prtn->ta_ram));
245 	if (!prtn->memory_map) {
246 		res = TEE_ERROR_OUT_OF_MEMORY;
247 		goto err;
248 	}
249 
250 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
251 
252 	original_data_pa = virt_to_phys(__data_start);
253 	/* Switch to guest's mappings */
254 	core_mmu_set_prtn(prtn->mmu_prtn);
255 
256 	/* clear .bss */
257 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
258 
259 	/* copy .data section from R/O original */
260 	memcpy(__data_start,
261 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
262 			    __data_end - __data_start),
263 	       __data_end - __data_start);
264 
265 	return TEE_SUCCESS;
266 
267 err:
268 	if (prtn->tee_ram)
269 		tee_mm_free(prtn->tee_ram);
270 	if (prtn->ta_ram)
271 		tee_mm_free(prtn->ta_ram);
272 	if (prtn->tables)
273 		tee_mm_free(prtn->tables);
274 	nex_free(prtn->mmu_prtn);
275 	nex_free(prtn->memory_map);
276 
277 	return res;
278 }
279 
280 TEE_Result virt_guest_created(uint16_t guest_id)
281 {
282 	struct guest_partition *prtn = NULL;
283 	TEE_Result res = TEE_SUCCESS;
284 	uint32_t exceptions = 0;
285 
286 	prtn = nex_calloc(1, sizeof(*prtn));
287 	if (!prtn)
288 		return TEE_ERROR_OUT_OF_MEMORY;
289 
290 	prtn->id = guest_id;
291 	mutex_init(&prtn->mutex);
292 	refcount_set(&prtn->refc, 1);
293 	res = configure_guest_prtn_mem(prtn);
294 	if (res) {
295 		nex_free(prtn);
296 		return res;
297 	}
298 
299 	set_current_prtn(prtn);
300 
301 	/* Initialize threads */
302 	thread_init_threads();
303 	/* Do the preinitcalls */
304 	call_preinitcalls();
305 
306 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
307 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
308 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
309 
310 	IMSG("Added guest %d", guest_id);
311 
312 	set_current_prtn(NULL);
313 	core_mmu_set_default_prtn();
314 
315 	return TEE_SUCCESS;
316 }
317 
318 TEE_Result virt_guest_destroyed(uint16_t guest_id)
319 {
320 	struct guest_partition *prtn;
321 	uint32_t exceptions;
322 
323 	IMSG("Removing guest %d", guest_id);
324 
325 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
326 
327 	LIST_FOREACH(prtn, &prtn_list, link) {
328 		if (prtn->id == guest_id) {
329 			LIST_REMOVE(prtn, link);
330 			break;
331 		}
332 	}
333 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
334 
335 	if (prtn) {
336 		if (!refcount_dec(&prtn->refc)) {
337 			EMSG("Guest thread(s) is still running. refc = %d",
338 			     refcount_val(&prtn->refc));
339 			panic();
340 		}
341 
342 		tee_mm_free(prtn->tee_ram);
343 		tee_mm_free(prtn->ta_ram);
344 		tee_mm_free(prtn->tables);
345 		core_free_mmu_prtn(prtn->mmu_prtn);
346 		nex_free(prtn->memory_map);
347 		nex_free(prtn);
348 	} else
349 		EMSG("Client with id %d is not found", guest_id);
350 
351 	return TEE_SUCCESS;
352 }
353 
354 TEE_Result virt_set_guest(uint16_t guest_id)
355 {
356 	struct guest_partition *prtn;
357 	uint32_t exceptions;
358 
359 	prtn = get_current_prtn();
360 
361 	/* This can be true only if we return from IRQ RPC */
362 	if (prtn && prtn->id == guest_id)
363 		return TEE_SUCCESS;
364 
365 	if (prtn)
366 		panic("Virtual guest partition is already set");
367 
368 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
369 	LIST_FOREACH(prtn, &prtn_list, link) {
370 		if (prtn->id == guest_id) {
371 			set_current_prtn(prtn);
372 			core_mmu_set_prtn(prtn->mmu_prtn);
373 			refcount_inc(&prtn->refc);
374 			cpu_spin_unlock_xrestore(&prtn_list_lock,
375 						 exceptions);
376 			return TEE_SUCCESS;
377 		}
378 	}
379 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
380 
381 	if (guest_id == HYP_CLNT_ID)
382 		return TEE_SUCCESS;
383 	return TEE_ERROR_ITEM_NOT_FOUND;
384 }
385 
386 void virt_unset_guest(void)
387 {
388 	struct guest_partition *prtn = get_current_prtn();
389 
390 	if (!prtn)
391 		return;
392 
393 	set_current_prtn(NULL);
394 	core_mmu_set_default_prtn();
395 	if (refcount_dec(&prtn->refc))
396 		panic();
397 }
398 
399 void virt_on_stdcall(void)
400 {
401 	struct guest_partition *prtn = get_current_prtn();
402 
403 	/* Initialize runtime on first std call */
404 	if (!prtn->runtime_initialized) {
405 		mutex_lock(&prtn->mutex);
406 		if (!prtn->runtime_initialized) {
407 			init_tee_runtime();
408 			prtn->runtime_initialized = true;
409 		}
410 		mutex_unlock(&prtn->mutex);
411 	}
412 }
413 
414 struct tee_mmap_region *virt_get_memory_map(void)
415 {
416 	struct guest_partition *prtn;
417 
418 	prtn = get_current_prtn();
419 
420 	if (!prtn)
421 		return NULL;
422 
423 	return prtn->memory_map;
424 }
425 
426 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
427 {
428 	struct guest_partition *prtn = get_current_prtn();
429 
430 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
431 				       MEM_AREA_TA_RAM,
432 				       tee_mm_get_bytes(prtn->ta_ram));
433 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
434 }
435