xref: /optee_os/core/arch/arm/kernel/virtualization.c (revision 4af447d4084e293800d4e463d65003c016b91f29)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /* Copyright (c) 2018, EPAM Systems. All rights reserved. */
3 
4 #include <compiler.h>
5 #include <platform_config.h>
6 #include <kernel/boot.h>
7 #include <kernel/linker.h>
8 #include <kernel/mutex.h>
9 #include <kernel/misc.h>
10 #include <kernel/panic.h>
11 #include <kernel/refcount.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/virtualization.h>
14 #include <mm/core_memprot.h>
15 #include <mm/core_mmu.h>
16 #include <mm/tee_mm.h>
17 #include <platform_config.h>
18 #include <sm/optee_smc.h>
19 #include <string.h>
20 #include <util.h>
21 
22 static unsigned int prtn_list_lock __nex_data = SPINLOCK_UNLOCK;
23 
24 static LIST_HEAD(prtn_list_head, guest_partition) prtn_list __nex_data =
25 	LIST_HEAD_INITIALIZER(prtn_list_head);
26 
27 /* Free pages used for guest partitions */
28 tee_mm_pool_t virt_mapper_pool __nex_bss;
29 
30 /* Memory used by OP-TEE core */
31 struct tee_mmap_region *kmemory_map __nex_bss;
32 
33 struct guest_partition {
34 	LIST_ENTRY(guest_partition) link;
35 	struct mmu_partition *mmu_prtn;
36 	struct tee_mmap_region *memory_map;
37 	struct mutex mutex;
38 	void *tables_va;
39 	tee_mm_entry_t *tee_ram;
40 	tee_mm_entry_t *ta_ram;
41 	tee_mm_entry_t *tables;
42 	bool runtime_initialized;
43 	uint16_t id;
44 	struct refcount refc;
45 };
46 
47 struct guest_partition *current_partition[CFG_TEE_CORE_NB_CORE] __nex_bss;
48 
49 static struct guest_partition *get_current_prtn(void)
50 {
51 	struct guest_partition *ret;
52 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
53 
54 	ret = current_partition[get_core_pos()];
55 
56 	thread_unmask_exceptions(exceptions);
57 
58 	return ret;
59 }
60 
61 static void set_current_prtn(struct guest_partition *prtn)
62 {
63 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
64 
65 	current_partition[get_core_pos()] = prtn;
66 
67 	thread_unmask_exceptions(exceptions);
68 }
69 
70 static size_t get_ta_ram_size(void)
71 {
72 	return ROUNDDOWN(TA_RAM_SIZE / CFG_VIRT_GUEST_COUNT -
73 			 VCORE_UNPG_RW_SZ -
74 			 core_mmu_get_total_pages_size(), SMALL_PAGE_SIZE);
75 }
76 
77 static struct tee_mmap_region *prepare_memory_map(paddr_t tee_data,
78 						  paddr_t ta_ram)
79 {
80 	int i, entries;
81 	vaddr_t max_va = 0;
82 	struct tee_mmap_region *map;
83 	/*
84 	 * This function assumes that at time of operation,
85 	 * kmemory_map (aka static_memory_map from core_mmu.c)
86 	 * will not be altered. This is true, because all
87 	 * changes to static_memory_map are done during
88 	 * OP-TEE initialization, while this function will
89 	 * called when hypervisor creates a guest.
90 	 */
91 
92 	/* Count number of entries in nexus memory map */
93 	for (map = kmemory_map, entries = 1; map->type != MEM_AREA_END;
94 	     map++, entries++)
95 		;
96 
97 	/* Allocate entries for virtual guest map */
98 	map = nex_calloc(entries + 1, sizeof(struct tee_mmap_region));
99 	if (!map)
100 		return NULL;
101 
102 	memcpy(map, kmemory_map, sizeof(*map) * entries);
103 
104 	/* Map TEE .data and .bss sections */
105 	for (i = 0; i < entries; i++) {
106 		if (map[i].va == (vaddr_t)(VCORE_UNPG_RW_PA)) {
107 			map[i].type = MEM_AREA_TEE_RAM_RW;
108 			map[i].attr = core_mmu_type_to_attr(map[i].type);
109 			map[i].pa = tee_data;
110 		}
111 		if (map[i].va + map[i].size > max_va)
112 			max_va = map[i].va + map[i].size;
113 	}
114 
115 	/* Map TA_RAM */
116 	assert(map[entries - 1].type == MEM_AREA_END);
117 	map[entries] = map[entries - 1];
118 	map[entries - 1].region_size = SMALL_PAGE_SIZE;
119 	map[entries - 1].va = ROUNDUP(max_va, map[entries - 1].region_size);
120 	map[entries - 1].va +=
121 		(ta_ram - map[entries - 1].va) & CORE_MMU_PGDIR_MASK;
122 	map[entries - 1].pa = ta_ram;
123 	map[entries - 1].size = get_ta_ram_size();
124 	map[entries - 1].type = MEM_AREA_TA_RAM;
125 	map[entries - 1].attr = core_mmu_type_to_attr(map[entries - 1].type);
126 
127 	DMSG("New map (%08lx):",  (vaddr_t)(VCORE_UNPG_RW_PA));
128 
129 	for (i = 0; i < entries; i++)
130 		DMSG("T: %-16s rsz: %08x, pa: %08lx, va: %08lx, sz: %08lx attr: %x",
131 		     teecore_memtype_name(map[i].type),
132 		     map[i].region_size, map[i].pa, map[i].va,
133 		     map[i].size, map[i].attr);
134 	return map;
135 }
136 
137 void virt_init_memory(struct tee_mmap_region *memory_map)
138 {
139 	struct tee_mmap_region *map;
140 
141 	/* Init page pool that covers all secure RAM */
142 	if (!tee_mm_init(&virt_mapper_pool, TEE_RAM_START,
143 			 TA_RAM_START + TA_RAM_SIZE,
144 			 SMALL_PAGE_SHIFT,
145 			 TEE_MM_POOL_NEX_MALLOC))
146 		panic("Can't create pool with free pages");
147 	DMSG("Created virtual mapper pool from %x to %x",
148 	     TEE_RAM_START, TA_RAM_START + TA_RAM_SIZE);
149 
150 	/* Carve out areas that are used by OP-TEE core */
151 	for (map = memory_map; map->type != MEM_AREA_END; map++) {
152 		switch (map->type) {
153 		case MEM_AREA_TEE_RAM_RX:
154 		case MEM_AREA_TEE_RAM_RO:
155 		case MEM_AREA_NEX_RAM_RO:
156 		case MEM_AREA_NEX_RAM_RW:
157 			DMSG("Carving out area of type %d (0x%08lx-0x%08lx)",
158 			     map->type, map->pa, map->pa + map->size);
159 			if (!tee_mm_alloc2(&virt_mapper_pool, map->pa,
160 					   map->size))
161 				panic("Can't carve out used area");
162 			break;
163 		default:
164 			continue;
165 		}
166 	}
167 
168 	kmemory_map = memory_map;
169 }
170 
171 
172 static int configure_guest_prtn_mem(struct guest_partition *prtn)
173 {
174 	int ret;
175 	paddr_t original_data_pa;
176 
177 	prtn->tee_ram = tee_mm_alloc(&virt_mapper_pool, VCORE_UNPG_RW_SZ);
178 	if (!prtn->tee_ram) {
179 		EMSG("Can't allocate memory for TEE runtime context");
180 		ret = TEE_ERROR_OUT_OF_MEMORY;
181 		goto err;
182 	}
183 	DMSG("TEE RAM: %08" PRIxPA, tee_mm_get_smem(prtn->tee_ram));
184 
185 	prtn->ta_ram = tee_mm_alloc(&virt_mapper_pool, get_ta_ram_size());
186 	if (!prtn->ta_ram) {
187 		EMSG("Can't allocate memory for TA data");
188 		ret = TEE_ERROR_OUT_OF_MEMORY;
189 		goto err;
190 	}
191 	DMSG("TA RAM: %08" PRIxPA, tee_mm_get_smem(prtn->ta_ram));
192 
193 	prtn->tables = tee_mm_alloc(&virt_mapper_pool,
194 				   core_mmu_get_total_pages_size());
195 	if (!prtn->tables) {
196 		EMSG("Can't allocate memory for page tables");
197 		ret = TEE_ERROR_OUT_OF_MEMORY;
198 		goto err;
199 	}
200 
201 	prtn->tables_va = phys_to_virt(tee_mm_get_smem(prtn->tables),
202 				      MEM_AREA_SEC_RAM_OVERALL,
203 				      core_mmu_get_total_pages_size());
204 	assert(prtn->tables_va);
205 
206 	prtn->mmu_prtn = core_alloc_mmu_prtn(prtn->tables_va);
207 	if (!prtn->mmu_prtn) {
208 		ret = TEE_ERROR_OUT_OF_MEMORY;
209 		goto err;
210 	}
211 
212 	prtn->memory_map = prepare_memory_map(tee_mm_get_smem(prtn->tee_ram),
213 					     tee_mm_get_smem(prtn->ta_ram));
214 	if (!prtn->memory_map) {
215 		ret = TEE_ERROR_OUT_OF_MEMORY;
216 		goto err;
217 	}
218 
219 	core_init_mmu_prtn(prtn->mmu_prtn, prtn->memory_map);
220 
221 	original_data_pa = virt_to_phys(__data_start);
222 	/* Switch to guest's mappings */
223 	core_mmu_set_prtn(prtn->mmu_prtn);
224 
225 	/* clear .bss */
226 	memset((void *)(VCORE_UNPG_RW_PA), 0, VCORE_UNPG_RW_SZ);
227 
228 	/* copy .data section from R/O original */
229 	memcpy(__data_start,
230 	       phys_to_virt(original_data_pa, MEM_AREA_SEC_RAM_OVERALL,
231 			    __data_end - __data_start),
232 	       __data_end - __data_start);
233 
234 	return 0;
235 
236 err:
237 	if (prtn->tee_ram)
238 		tee_mm_free(prtn->tee_ram);
239 	if (prtn->ta_ram)
240 		tee_mm_free(prtn->ta_ram);
241 	if (prtn->tables)
242 		tee_mm_free(prtn->tables);
243 	nex_free(prtn->mmu_prtn);
244 	nex_free(prtn->memory_map);
245 
246 	return ret;
247 }
248 
249 uint32_t virt_guest_created(uint16_t guest_id)
250 {
251 	struct guest_partition *prtn;
252 	uint32_t exceptions;
253 
254 	prtn = nex_calloc(1, sizeof(*prtn));
255 	if (!prtn)
256 		return OPTEE_SMC_RETURN_ENOTAVAIL;
257 
258 	prtn->id = guest_id;
259 	mutex_init(&prtn->mutex);
260 	refcount_set(&prtn->refc, 1);
261 	if (configure_guest_prtn_mem(prtn)) {
262 		nex_free(prtn);
263 		return OPTEE_SMC_RETURN_ENOTAVAIL;
264 	}
265 
266 	set_current_prtn(prtn);
267 
268 	/* Initialize threads */
269 	thread_init_threads();
270 
271 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
272 	LIST_INSERT_HEAD(&prtn_list, prtn, link);
273 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
274 
275 	IMSG("Added guest %d", guest_id);
276 
277 	set_current_prtn(NULL);
278 	core_mmu_set_default_prtn();
279 	return OPTEE_SMC_RETURN_OK;
280 }
281 
282 uint32_t virt_guest_destroyed(uint16_t guest_id)
283 {
284 	struct guest_partition *prtn;
285 	uint32_t exceptions;
286 
287 	IMSG("Removing guest %d", guest_id);
288 
289 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
290 
291 	LIST_FOREACH(prtn, &prtn_list, link) {
292 		if (prtn->id == guest_id) {
293 			LIST_REMOVE(prtn, link);
294 			break;
295 		}
296 	}
297 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
298 
299 	if (prtn) {
300 		if (!refcount_dec(&prtn->refc)) {
301 			EMSG("Guest thread(s) is still running. refc = %d",
302 			     refcount_val(&prtn->refc));
303 			panic();
304 		}
305 
306 		tee_mm_free(prtn->tee_ram);
307 		tee_mm_free(prtn->ta_ram);
308 		tee_mm_free(prtn->tables);
309 		core_free_mmu_prtn(prtn->mmu_prtn);
310 		nex_free(prtn->memory_map);
311 		nex_free(prtn);
312 	} else
313 		EMSG("Client with id %d is not found", guest_id);
314 
315 	return OPTEE_SMC_RETURN_OK;
316 }
317 
318 bool virt_set_guest(uint16_t guest_id)
319 {
320 	struct guest_partition *prtn;
321 	uint32_t exceptions;
322 
323 	prtn = get_current_prtn();
324 
325 	/* This can be true only if we return from IRQ RPC */
326 	if (prtn && prtn->id == guest_id)
327 		return true;
328 
329 	if (prtn)
330 		panic("Virtual guest partition is already set");
331 
332 	exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
333 	LIST_FOREACH(prtn, &prtn_list, link) {
334 		if (prtn->id == guest_id) {
335 			set_current_prtn(prtn);
336 			core_mmu_set_prtn(prtn->mmu_prtn);
337 			refcount_inc(&prtn->refc);
338 			cpu_spin_unlock_xrestore(&prtn_list_lock,
339 						 exceptions);
340 			return true;
341 		}
342 	}
343 	cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);
344 
345 	return guest_id == HYP_CLNT_ID;
346 }
347 
348 void virt_unset_guest(void)
349 {
350 	struct guest_partition *prtn = get_current_prtn();
351 
352 	if (!prtn)
353 		return;
354 
355 	set_current_prtn(NULL);
356 	core_mmu_set_default_prtn();
357 	if (refcount_dec(&prtn->refc))
358 		panic();
359 }
360 
361 void virt_on_stdcall(void)
362 {
363 	struct guest_partition *prtn = get_current_prtn();
364 
365 	/* Initialize runtime on first std call */
366 	if (!prtn->runtime_initialized) {
367 		mutex_lock(&prtn->mutex);
368 		if (!prtn->runtime_initialized) {
369 			init_tee_runtime();
370 			prtn->runtime_initialized = true;
371 		}
372 		mutex_unlock(&prtn->mutex);
373 	}
374 }
375 
376 struct tee_mmap_region *virt_get_memory_map(void)
377 {
378 	struct guest_partition *prtn;
379 
380 	prtn = get_current_prtn();
381 
382 	if (!prtn)
383 		return NULL;
384 
385 	return prtn->memory_map;
386 }
387 
388 void virt_get_ta_ram(vaddr_t *start, vaddr_t *end)
389 {
390 	struct guest_partition *prtn = get_current_prtn();
391 
392 	*start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram),
393 				       MEM_AREA_TA_RAM,
394 				       tee_mm_get_bytes(prtn->ta_ram));
395 	*end = *start + tee_mm_get_bytes(prtn->ta_ram);
396 }
397