xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 54e4b08c64e896ccef670841011b6526ac7f515e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3 
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21 
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23 
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 	LIST_HEAD_INITIALIZER(prtn_list_head);
26 
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29 
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32 
33 struct guest_partition {
34 	LIST_ENTRY(guest_partition) link;
35 	struct mmu_partition *mmu_prtn;
36 	struct tee_mmap_region *memory_map;
37 	struct mutex mutex;
38 	void *tables_va;
39 	tee_mm_entry_t *tee_ram;
40 	tee_mm_entry_t *ta_ram;
41 	tee_mm_entry_t *tables;
42 	bool runtime_initialized;
43 	uint16_t id;
44 	struct refcount refc;
45 };
46 
47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
48 
49 static struct guest_partition *get_current_prtn(void)
50 {
51 	struct guest_partition *ret;
52 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
53 
54 	ret = current_partition[get_core_pos()];
55 
56 	thread_unmask_exceptions(exceptions);
57 
58 	return ret;
59 }
60 
61 static void set_current_prtn(struct guest_partition *prtn)
62 {
63 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
64 
65 	current_partition[get_core_pos()] = prtn;
66 
67 	thread_unmask_exceptions(exceptions);
68 }
69 
70 static size_t get_ta_ram_size(void)
71 {
72 	size_t ta_size = 0;
73 
74 	core_mmu_get_ta_range(NULL, &ta_size);
75 	return ROUNDDOWN(ta_size / CFG_VIRT_GUEST_COUNT - VCORE_UNPG_RW_SZ -
76 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
77 }
78 
79 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
80 						  paddr_t ta_ram)
81 {
82 	int i, entries;
83 	vaddr_t max_va = 0;
84 	struct tee_mmap_region *map;
85 	/*
86 	 * This function assumes that at time of operation,
87 	 * kmemory_map (aka static_memory_map from core_mmu.c)
88 	 * will not be altered. This is true, because all
89 	 * changes to static_memory_map are done during
90 	 * OP-TEE initialization, while this function will
91 	 * called when hypervisor creates a guest.
92 	 */
93 
94 	/* Count number of entries in nexus memory map */
95 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
96 	     map++, entries++)
97 		;
98 
99 	/* Allocate entries for virtual guest map */
100 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
101 	if (!map)
102 		return NULL;
103 
104 	memcpy(map, kmemory_map, sizeof(*map) * entries);
105 
106 	/* Map TEE .data and .bss sections */
107 	for (i = 0; i < entries; i++) {
108 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
109 			map[i].type = MEM_AREA_TEE_RAM_RW;
110 			map[i].attr = core_mmu_type_to_attr(map[i].type);
111 			map[i].pa = tee_data;
112 		}
113 		if (map[i].va + map[i].size > max_va)
114 			max_va = map[i].va + map[i].size;
115 	}
116 
117 	/* Map TA_RAM */
118 	assert(map[entries - 1].type == MEM_AREA_END);
119 	map[entries] = map[entries - 1];
120 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
121 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
122 	map[entries - 1].va +=
123 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
124 	map[entries - 1].pa = ta_ram;
125 	map[entries - 1].size = get_ta_ram_size();
126 	map[entries - 1].type = MEM_AREA_TA_RAM;
127 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
128 
129 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
130 
131 	for (i = 0; i < entries; i++)
132 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
133 		     teecore_memtype_name(map[i].type),
134 		     map[i].region_size, map[i].pa, map[i].va,
135 		     map[i].size, map[i].attr);
136 	return map;
137 }
138 
139 void virt_init_memory(struct tee_mmap_region *memory_map, paddr_t secmem0_base,
140 		      paddr_size_t secmem0_size, paddr_t secmem1_base,
141 		      paddr_size_t secmem1_size)
142 {
143 	struct tee_mmap_region *map = NULL;
144 	paddr_size_t size = secmem0_size;
145 	paddr_t base = secmem0_base;
146 
147 	if (secmem1_size) {
148 		assert(secmem0_base + secmem0_size <= secmem1_base);
149 		size = secmem1_base + secmem1_size - base;
150 	}
151 
152 	/* Init page pool that covers all secure RAM */
153 	if (!tee_mm_init(&virt_mapper_pool, base, size,
154 			 SMALL_PAGE_SHIFT, TEE_MM_POOL_NEX_MALLOC))
155 		panic("Can't create pool with free pages");
156 	DMSG("Created virtual mapper pool from %"PRIxPA" to %"PRIxPA,
157 	     base, base + size);
158 
159 	if (secmem1_size) {
160 		/* Carve out an eventual gap between secmem0 and secmem1 */
161 		base = secmem0_base + secmem0_size;
162 		size = secmem1_base - base;
163 		if (size) {
164 			DMSG("Carving out gap between secmem0 and secmem1 (0x%"PRIxPA":0x%"PRIxPASZ")",
165 			     base, size);
166 			if (!tee_mm_alloc2(&virt_mapper_pool, base, size))
167 				panic("Can't carve out secmem gap");
168 		}
169 	}
170 
171 
172 	/* Carve out areas that are used by OP-TEE core */
173 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
174 		switch (map->type) {
175 		case MEM_AREA_TEE_RAM_RX:
176 		case MEM_AREA_TEE_RAM_RO:
177 		case MEM_AREA_NEX_RAM_RO:
178 		case MEM_AREA_NEX_RAM_RW:
179 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
180 			     map->type, map->pa, map->pa + map->size);
181 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
182 					   map->size))
183 				panic("Can't carve out used area");
184 			break;
185 		default:
186 			continue;
187 		}
188 	}
189 
190 	kmemory_map = memory_map;
191 }
192 
193 
194 static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn)
195 {
196 	TEE_Result res = TEE_SUCCESS;
197 	paddr_t original_data_pa = 0;
198 
199 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
200 	if (!prtn->tee_ram) {
201 		EMSG("Can't allocate memory for TEE runtime context");
202 		res = TEE_ERROR_OUT_OF_MEMORY;
203 		goto err;
204 	}
205 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
206 
207 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
208 	if (!prtn->ta_ram) {
209 		EMSG("Can't allocate memory for TA data");
210 		res = TEE_ERROR_OUT_OF_MEMORY;
211 		goto err;
212 	}
213 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
214 
215 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
216 				   core_mmu_get_total_pages_size());
217 	if (!prtn->tables) {
218 		EMSG("Can't allocate memory for page tables");
219 		res = TEE_ERROR_OUT_OF_MEMORY;
220 		goto err;
221 	}
222 
223 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
224 				      MEM_AREA_SEC_RAM_OVERALL,
225 				      core_mmu_get_total_pages_size());
226 	assert(prtn->tables_va);
227 
228 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
229 	if (!prtn->mmu_prtn) {
230 		res = TEE_ERROR_OUT_OF_MEMORY;
231 		goto err;
232 	}
233 
234 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
235 					     tee_mm_get_smem(prtn->ta_ram));
236 	if (!prtn->memory_map) {
237 		res = TEE_ERROR_OUT_OF_MEMORY;
238 		goto err;
239 	}
240 
241 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
242 
243 	original_data_pa = virt_to_phys(__data_start);
244 	/* Switch to guest's mappings */
245 	core_mmu_set_prtn(prtn->mmu_prtn);
246 
247 	/* clear .bss */
248 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
249 
250 	/* copy .data section from R/O original */
251 	memcpy(__data_start,
252 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
253 			    __data_end - __data_start),
254 	       __data_end - __data_start);
255 
256 	return TEE_SUCCESS;
257 
258 err:
259 	if (prtn->tee_ram)
260 		tee_mm_free(prtn->tee_ram);
261 	if (prtn->ta_ram)
262 		tee_mm_free(prtn->ta_ram);
263 	if (prtn->tables)
264 		tee_mm_free(prtn->tables);
265 	nex_free(prtn->mmu_prtn);
266 	nex_free(prtn->memory_map);
267 
268 	return res;
269 }
270 
271 TEE_Result virt_guest_created(uint16_t guest_id)
272 {
273 	struct guest_partition *prtn = NULL;
274 	TEE_Result res = TEE_SUCCESS;
275 	uint32_t exceptions = 0;
276 
277 	prtn = nex_calloc(1, sizeof(*prtn));
278 	if (!prtn)
279 		return TEE_ERROR_OUT_OF_MEMORY;
280 
281 	prtn->id = guest_id;
282 	mutex_init(&prtn->mutex);
283 	refcount_set(&prtn->refc, 1);
284 	res = configure_guest_prtn_mem(prtn);
285 	if (res) {
286 		nex_free(prtn);
287 		return res;
288 	}
289 
290 	set_current_prtn(prtn);
291 
292 	/* Initialize threads */
293 	thread_init_threads();
294 	/* Do the preinitcalls */
295 	call_preinitcalls();
296 
297 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
298 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
299 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
300 
301 	IMSG("Added guest %d", guest_id);
302 
303 	set_current_prtn(NULL);
304 	core_mmu_set_default_prtn();
305 
306 	return TEE_SUCCESS;
307 }
308 
309 TEE_Result virt_guest_destroyed(uint16_t guest_id)
310 {
311 	struct guest_partition *prtn;
312 	uint32_t exceptions;
313 
314 	IMSG("Removing guest %d", guest_id);
315 
316 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
317 
318 	LIST_FOREACH(prtn, &prtn_list, link) {
319 		if (prtn->id == guest_id) {
320 			LIST_REMOVE(prtn, link);
321 			break;
322 		}
323 	}
324 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
325 
326 	if (prtn) {
327 		if (!refcount_dec(&prtn->refc)) {
328 			EMSG("Guest thread(s) is still running. refc = %d",
329 			     refcount_val(&prtn->refc));
330 			panic();
331 		}
332 
333 		tee_mm_free(prtn->tee_ram);
334 		tee_mm_free(prtn->ta_ram);
335 		tee_mm_free(prtn->tables);
336 		core_free_mmu_prtn(prtn->mmu_prtn);
337 		nex_free(prtn->memory_map);
338 		nex_free(prtn);
339 	} else
340 		EMSG("Client with id %d is not found", guest_id);
341 
342 	return TEE_SUCCESS;
343 }
344 
345 TEE_Result virt_set_guest(uint16_t guest_id)
346 {
347 	struct guest_partition *prtn;
348 	uint32_t exceptions;
349 
350 	prtn = get_current_prtn();
351 
352 	/* This can be true only if we return from IRQ RPC */
353 	if (prtn && prtn->id == guest_id)
354 		return TEE_SUCCESS;
355 
356 	if (prtn)
357 		panic("Virtual guest partition is already set");
358 
359 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
360 	LIST_FOREACH(prtn, &prtn_list, link) {
361 		if (prtn->id == guest_id) {
362 			set_current_prtn(prtn);
363 			core_mmu_set_prtn(prtn->mmu_prtn);
364 			refcount_inc(&prtn->refc);
365 			cpu_spin_unlock_xrestore(&prtn_list_lock,
366 						 exceptions);
367 			return TEE_SUCCESS;
368 		}
369 	}
370 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
371 
372 	if (guest_id == HYP_CLNT_ID)
373 		return TEE_SUCCESS;
374 	return TEE_ERROR_ITEM_NOT_FOUND;
375 }
376 
377 void virt_unset_guest(void)
378 {
379 	struct guest_partition *prtn = get_current_prtn();
380 
381 	if (!prtn)
382 		return;
383 
384 	set_current_prtn(NULL);
385 	core_mmu_set_default_prtn();
386 	if (refcount_dec(&prtn->refc))
387 		panic();
388 }
389 
390 void virt_on_stdcall(void)
391 {
392 	struct guest_partition *prtn = get_current_prtn();
393 
394 	/* Initialize runtime on first std call */
395 	if (!prtn->runtime_initialized) {
396 		mutex_lock(&prtn->mutex);
397 		if (!prtn->runtime_initialized) {
398 			init_tee_runtime();
399 			prtn->runtime_initialized = true;
400 		}
401 		mutex_unlock(&prtn->mutex);
402 	}
403 }
404 
405 struct tee_mmap_region *virt_get_memory_map(void)
406 {
407 	struct guest_partition *prtn;
408 
409 	prtn = get_current_prtn();
410 
411 	if (!prtn)
412 		return NULL;
413 
414 	return prtn->memory_map;
415 }
416 
417 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
418 {
419 	struct guest_partition *prtn = get_current_prtn();
420 
421 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
422 				       MEM_AREA_TA_RAM,
423 				       tee_mm_get_bytes(prtn->ta_ram));
424 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
425 }
426