xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3 
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21 
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23 
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 	LIST_HEAD_INITIALIZER(prtn_list_head);
26 
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29 
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32 
33 struct guest_partition {
34 	LIST_ENTRY(guest_partition) link;
35 	struct mmu_partition *mmu_prtn;
36 	struct tee_mmap_region *memory_map;
37 	struct mutex mutex;
38 	void *tables_va;
39 	tee_mm_entry_t *tee_ram;
40 	tee_mm_entry_t *ta_ram;
41 	tee_mm_entry_t *tables;
42 	bool runtime_initialized;
43 	uint16_t id;
44 	struct refcount refc;
45 };
46 
47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
48 
49 static struct guest_partition *get_current_prtn(void)
50 {
51 	struct guest_partition *ret;
52 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
53 
54 	ret = current_partition[get_core_pos()];
55 
56 	thread_unmask_exceptions(exceptions);
57 
58 	return ret;
59 }
60 
61 static void set_current_prtn(struct guest_partition *prtn)
62 {
63 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
64 
65 	current_partition[get_core_pos()] = prtn;
66 
67 	thread_unmask_exceptions(exceptions);
68 }
69 
70 static size_t get_ta_ram_size(void)
71 {
72 	return ROUNDDOWN(TA_RAM_SIZE / CFG_VIRT_GUEST_COUNT -
73 			 VCORE_UNPG_RW_SZ -
74 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
75 }
76 
77 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
78 						  paddr_t ta_ram)
79 {
80 	int i, entries;
81 	vaddr_t max_va = 0;
82 	struct tee_mmap_region *map;
83 	/*
84 	 * This function assumes that at time of operation,
85 	 * kmemory_map (aka static_memory_map from core_mmu.c)
86 	 * will not be altered. This is true, because all
87 	 * changes to static_memory_map are done during
88 	 * OP-TEE initialization, while this function will
89 	 * called when hypervisor creates a guest.
90 	 */
91 
92 	/* Count number of entries in nexus memory map */
93 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
94 	     map++, entries++)
95 		;
96 
97 	/* Allocate entries for virtual guest map */
98 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
99 	if (!map)
100 		return NULL;
101 
102 	memcpy(map, kmemory_map, sizeof(*map) * entries);
103 
104 	/* Map TEE .data and .bss sections */
105 	for (i = 0; i < entries; i++) {
106 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
107 			map[i].type = MEM_AREA_TEE_RAM_RW;
108 			map[i].attr = core_mmu_type_to_attr(map[i].type);
109 			map[i].pa = tee_data;
110 		}
111 		if (map[i].va + map[i].size > max_va)
112 			max_va = map[i].va + map[i].size;
113 	}
114 
115 	/* Map TA_RAM */
116 	assert(map[entries - 1].type == MEM_AREA_END);
117 	map[entries] = map[entries - 1];
118 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
119 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
120 	map[entries - 1].va +=
121 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
122 	map[entries - 1].pa = ta_ram;
123 	map[entries - 1].size = get_ta_ram_size();
124 	map[entries - 1].type = MEM_AREA_TA_RAM;
125 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
126 
127 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
128 
129 	for (i = 0; i < entries; i++)
130 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
131 		     teecore_memtype_name(map[i].type),
132 		     map[i].region_size, map[i].pa, map[i].va,
133 		     map[i].size, map[i].attr);
134 	return map;
135 }
136 
137 void virt_init_memory(struct tee_mmap_region *memory_map)
138 {
139 	struct tee_mmap_region *map;
140 
141 	/* Init page pool that covers all secure RAM */
142 	if (!tee_mm_init(&virt_mapper_pool, TEE_RAM_START,
143 			 TA_RAM_START + TA_RAM_SIZE,
144 			 SMALL_PAGE_SHIFT,
145 			 TEE_MM_POOL_NEX_MALLOC))
146 		panic("Can't create pool with free pages");
147 	DMSG("Created virtual mapper pool from %x to %x",
148 	     TEE_RAM_START, TA_RAM_START + TA_RAM_SIZE);
149 
150 	/* Carve out areas that are used by OP-TEE core */
151 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
152 		switch (map->type) {
153 		case MEM_AREA_TEE_RAM_RX:
154 		case MEM_AREA_TEE_RAM_RO:
155 		case MEM_AREA_NEX_RAM_RW:
156 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
157 			     map->type, map->pa, map->pa + map->size);
158 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
159 					   map->size))
160 				panic("Can't carve out used area");
161 			break;
162 		default:
163 			continue;
164 		}
165 	}
166 
167 	kmemory_map = memory_map;
168 }
169 
170 
171 static int configure_guest_prtn_mem(struct guest_partition *prtn)
172 {
173 	int ret;
174 	paddr_t original_data_pa;
175 
176 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
177 	if (!prtn->tee_ram) {
178 		EMSG("Can't allocate memory for TEE runtime context");
179 		ret = TEE_ERROR_OUT_OF_MEMORY;
180 		goto err;
181 	}
182 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
183 
184 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
185 	if (!prtn->ta_ram) {
186 		EMSG("Can't allocate memory for TA data");
187 		ret = TEE_ERROR_OUT_OF_MEMORY;
188 		goto err;
189 	}
190 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
191 
192 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
193 				   core_mmu_get_total_pages_size());
194 	if (!prtn->tables) {
195 		EMSG("Can't allocate memory for page tables");
196 		ret = TEE_ERROR_OUT_OF_MEMORY;
197 		goto err;
198 	}
199 
200 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
201 				      MEM_AREA_SEC_RAM_OVERALL);
202 	assert(prtn->tables_va);
203 
204 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
205 	if (!prtn->mmu_prtn) {
206 		ret = TEE_ERROR_OUT_OF_MEMORY;
207 		goto err;
208 	}
209 
210 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
211 					     tee_mm_get_smem(prtn->ta_ram));
212 	if (!prtn->memory_map) {
213 		ret = TEE_ERROR_OUT_OF_MEMORY;
214 		goto err;
215 	}
216 
217 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
218 
219 	original_data_pa = virt_to_phys(__data_start);
220 	/* Switch to guest's mappings */
221 	core_mmu_set_prtn(prtn->mmu_prtn);
222 
223 	/* clear .bss */
224 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
225 
226 	/* copy .data section from R/O original */
227 	memcpy(__data_start,
228 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL),
229 	       __data_end - __data_start);
230 
231 	return 0;
232 
233 err:
234 	if (prtn->tee_ram)
235 		tee_mm_free(prtn->tee_ram);
236 	if (prtn->ta_ram)
237 		tee_mm_free(prtn->ta_ram);
238 	if (prtn->tables)
239 		tee_mm_free(prtn->tables);
240 	nex_free(prtn->mmu_prtn);
241 	nex_free(prtn->memory_map);
242 
243 	return ret;
244 }
245 
246 uint32_t virt_guest_created(uint16_t guest_id)
247 {
248 	struct guest_partition *prtn;
249 	uint32_t exceptions;
250 
251 	prtn = nex_calloc(1, sizeof(*prtn));
252 	if (!prtn)
253 		return OPTEE_SMC_RETURN_ENOTAVAIL;
254 
255 	prtn->id = guest_id;
256 	mutex_init(&prtn->mutex);
257 	refcount_set(&prtn->refc, 1);
258 	if (configure_guest_prtn_mem(prtn)) {
259 		nex_free(prtn);
260 		return OPTEE_SMC_RETURN_ENOTAVAIL;
261 	}
262 
263 	set_current_prtn(prtn);
264 
265 	/* Initialize threads */
266 	thread_init_threads();
267 
268 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
269 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
270 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
271 
272 	IMSG("Added guest %d", guest_id);
273 
274 	set_current_prtn(NULL);
275 	core_mmu_set_default_prtn();
276 	return OPTEE_SMC_RETURN_OK;
277 }
278 
279 uint32_t virt_guest_destroyed(uint16_t guest_id)
280 {
281 	struct guest_partition *prtn;
282 	uint32_t exceptions;
283 
284 	IMSG("Removing guest %d", guest_id);
285 
286 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
287 
288 	LIST_FOREACH(prtn, &prtn_list, link) {
289 		if (prtn->id == guest_id) {
290 			LIST_REMOVE(prtn, link);
291 			break;
292 		}
293 	}
294 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
295 
296 	if (prtn) {
297 		if (!refcount_dec(&prtn->refc)) {
298 			EMSG("Guest thread(s) is still running. refc = %d",
299 			     refcount_val(&prtn->refc));
300 			panic();
301 		}
302 
303 		tee_mm_free(prtn->tee_ram);
304 		tee_mm_free(prtn->ta_ram);
305 		tee_mm_free(prtn->tables);
306 		core_free_mmu_prtn(prtn->mmu_prtn);
307 		nex_free(prtn->memory_map);
308 		nex_free(prtn);
309 	} else
310 		EMSG("Client with id %d is not found", guest_id);
311 
312 	return OPTEE_SMC_RETURN_OK;
313 }
314 
315 bool virt_set_guest(uint16_t guest_id)
316 {
317 	struct guest_partition *prtn;
318 	uint32_t exceptions;
319 
320 	prtn = get_current_prtn();
321 
322 	/* This can be true only if we return from IRQ RPC */
323 	if (prtn && prtn->id == guest_id)
324 		return true;
325 
326 	if (prtn)
327 		panic("Virtual guest partition is already set");
328 
329 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
330 	LIST_FOREACH(prtn, &prtn_list, link) {
331 		if (prtn->id == guest_id) {
332 			set_current_prtn(prtn);
333 			core_mmu_set_prtn(prtn->mmu_prtn);
334 			refcount_inc(&prtn->refc);
335 			cpu_spin_unlock_xrestore(&prtn_list_lock,
336 						 exceptions);
337 			return true;
338 		}
339 	}
340 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
341 
342 	return guest_id == HYP_CLNT_ID;
343 }
344 
345 void virt_unset_guest(void)
346 {
347 	struct guest_partition *prtn = get_current_prtn();
348 
349 	if (!prtn)
350 		return;
351 
352 	set_current_prtn(NULL);
353 	core_mmu_set_default_prtn();
354 	if (refcount_dec(&prtn->refc))
355 		panic();
356 }
357 
358 void virt_on_stdcall(void)
359 {
360 	struct guest_partition *prtn = get_current_prtn();
361 
362 	/* Initialize runtime on first std call */
363 	if (!prtn->runtime_initialized) {
364 		mutex_lock(&prtn->mutex);
365 		if (!prtn->runtime_initialized) {
366 			init_tee_runtime();
367 			prtn->runtime_initialized = true;
368 		}
369 		mutex_unlock(&prtn->mutex);
370 	}
371 }
372 
373 struct tee_mmap_region *virt_get_memory_map(void)
374 {
375 	struct guest_partition *prtn;
376 
377 	prtn = get_current_prtn();
378 
379 	if (!prtn)
380 		return NULL;
381 
382 	return prtn->memory_map;
383 }
384 
385 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
386 {
387 	struct guest_partition *prtn = get_current_prtn();
388 
389 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
390 				       MEM_AREA_TA_RAM);
391 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
392 }
393