xref: /optee_os/core/kernel/ldelf_syscalls.c (revision cbe7e1b87977e7dff91e859f5ff0c839b9387c4a)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <kernel/ldelf_syscalls.h>
9 #include <kernel/user_mode_ctx.h>
10 #include <ldelf.h>
11 #include <mm/file.h>
12 #include <mm/fobj.h>
13 #include <mm/mobj.h>
14 #include <mm/vm.h>
15 #include <pta_system.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <trace.h>
19 #include <util.h>
20 
21 struct bin_handle {
22 	const struct ts_store_ops *op;
23 	struct ts_store_handle *h;
24 	struct file *f;
25 	size_t offs_bytes;
26 	size_t size_bytes;
27 };
28 
29 void ta_bin_close(void *ptr)
30 {
31 	struct bin_handle *binh = ptr;
32 
33 	if (binh) {
34 		if (binh->op && binh->h)
35 			binh->op->close(binh->h);
36 		file_put(binh->f);
37 	}
38 	free(binh);
39 }
40 
41 TEE_Result ldelf_open_ta_binary(struct system_ctx *ctx, uint32_t param_types,
42 				TEE_Param params[TEE_NUM_PARAMS])
43 {
44 	TEE_Result res = TEE_SUCCESS;
45 	struct bin_handle *binh = NULL;
46 	int h = 0;
47 	TEE_UUID *uuid = NULL;
48 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
49 	unsigned int tag_len = sizeof(tag);
50 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INPUT,
51 					  TEE_PARAM_TYPE_VALUE_OUTPUT,
52 					  TEE_PARAM_TYPE_NONE,
53 					  TEE_PARAM_TYPE_NONE);
54 
55 	if (exp_pt != param_types)
56 		return TEE_ERROR_BAD_PARAMETERS;
57 	if (params[0].memref.size != sizeof(*uuid))
58 		return TEE_ERROR_BAD_PARAMETERS;
59 
60 	uuid = params[0].memref.buffer;
61 
62 	binh = calloc(1, sizeof(*binh));
63 	if (!binh)
64 		return TEE_ERROR_OUT_OF_MEMORY;
65 
66 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct ts_store_ops) {
67 		DMSG("Lookup user TA ELF %pUl (%s)",
68 		     (void *)uuid, binh->op->description);
69 
70 		res = binh->op->open(uuid, &binh->h);
71 		DMSG("res=0x%x", res);
72 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
73 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
74 			break;
75 	}
76 	if (res)
77 		goto err;
78 
79 	res = binh->op->get_size(binh->h, &binh->size_bytes);
80 	if (res)
81 		goto err;
82 	res = binh->op->get_tag(binh->h, tag, &tag_len);
83 	if (res)
84 		goto err;
85 	binh->f = file_get_by_tag(tag, tag_len);
86 	if (!binh->f)
87 		goto err_oom;
88 
89 	h = handle_get(&ctx->db, binh);
90 	if (h < 0)
91 		goto err_oom;
92 	params[0].value.a = h;
93 
94 	return TEE_SUCCESS;
95 err_oom:
96 	res = TEE_ERROR_OUT_OF_MEMORY;
97 err:
98 	ta_bin_close(binh);
99 	return res;
100 }
101 
102 TEE_Result ldelf_close_ta_binary(struct system_ctx *ctx, uint32_t param_types,
103 				 TEE_Param params[TEE_NUM_PARAMS])
104 {
105 	TEE_Result res = TEE_SUCCESS;
106 	struct bin_handle *binh = NULL;
107 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
108 					  TEE_PARAM_TYPE_NONE,
109 					  TEE_PARAM_TYPE_NONE,
110 					  TEE_PARAM_TYPE_NONE);
111 
112 	if (exp_pt != param_types)
113 		return TEE_ERROR_BAD_PARAMETERS;
114 
115 	if (params[0].value.b)
116 		return TEE_ERROR_BAD_PARAMETERS;
117 
118 	binh = handle_put(&ctx->db, params[0].value.a);
119 	if (!binh)
120 		return TEE_ERROR_BAD_PARAMETERS;
121 
122 	if (binh->offs_bytes < binh->size_bytes)
123 		res = binh->op->read(binh->h, NULL,
124 				     binh->size_bytes - binh->offs_bytes);
125 
126 	ta_bin_close(binh);
127 
128 	return res;
129 }
130 
131 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
132 			       size_t offs_bytes, size_t num_bytes)
133 {
134 	TEE_Result res = TEE_SUCCESS;
135 	size_t next_offs = 0;
136 
137 	if (offs_bytes < binh->offs_bytes)
138 		return TEE_ERROR_BAD_STATE;
139 
140 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
141 		return TEE_ERROR_BAD_PARAMETERS;
142 
143 	if (offs_bytes > binh->offs_bytes) {
144 		res = binh->op->read(binh->h, NULL,
145 				     offs_bytes - binh->offs_bytes);
146 		if (res)
147 			return res;
148 		binh->offs_bytes = offs_bytes;
149 	}
150 
151 	if (next_offs > binh->size_bytes) {
152 		size_t rb = binh->size_bytes - binh->offs_bytes;
153 
154 		res = binh->op->read(binh->h, (void *)va, rb);
155 		if (res)
156 			return res;
157 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
158 		binh->offs_bytes = binh->size_bytes;
159 	} else {
160 		res = binh->op->read(binh->h, (void *)va, num_bytes);
161 		if (res)
162 			return res;
163 		binh->offs_bytes = next_offs;
164 	}
165 
166 	return TEE_SUCCESS;
167 }
168 
169 TEE_Result ldelf_map_ta_binary(struct system_ctx *ctx,
170 			       struct user_mode_ctx *uctx,
171 			       uint32_t param_types,
172 			       TEE_Param params[TEE_NUM_PARAMS])
173 {
174 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_SHAREABLE |
175 				      PTA_SYSTEM_MAP_FLAG_WRITEABLE |
176 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
177 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
178 					  TEE_PARAM_TYPE_VALUE_INPUT,
179 					  TEE_PARAM_TYPE_VALUE_INOUT,
180 					  TEE_PARAM_TYPE_VALUE_INPUT);
181 	struct bin_handle *binh = NULL;
182 	uint32_t num_rounded_bytes = 0;
183 	TEE_Result res = TEE_SUCCESS;
184 	struct file_slice *fs = NULL;
185 	bool file_is_locked = false;
186 	struct mobj *mobj = NULL;
187 	uint32_t offs_bytes = 0;
188 	uint32_t offs_pages = 0;
189 	uint32_t num_bytes = 0;
190 	uint32_t pad_begin = 0;
191 	uint32_t pad_end = 0;
192 	size_t num_pages = 0;
193 	uint32_t flags = 0;
194 	uint32_t prot = 0;
195 	vaddr_t va = 0;
196 
197 	if (exp_pt != param_types)
198 		return TEE_ERROR_BAD_PARAMETERS;
199 
200 	binh = handle_lookup(&ctx->db, params[0].value.a);
201 	if (!binh)
202 		return TEE_ERROR_BAD_PARAMETERS;
203 	flags = params[0].value.b;
204 	offs_bytes = params[1].value.a;
205 	num_bytes = params[1].value.b;
206 	va = reg_pair_to_64(params[2].value.a, params[2].value.b);
207 	pad_begin = params[3].value.a;
208 	pad_end = params[3].value.b;
209 
210 	if ((flags & accept_flags) != flags)
211 		return TEE_ERROR_BAD_PARAMETERS;
212 
213 	if ((flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE) &&
214 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
215 		return TEE_ERROR_BAD_PARAMETERS;
216 
217 	if ((flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE) &&
218 	    (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE))
219 		return TEE_ERROR_BAD_PARAMETERS;
220 
221 	if (offs_bytes & SMALL_PAGE_MASK)
222 		return TEE_ERROR_BAD_PARAMETERS;
223 
224 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
225 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
226 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
227 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
228 		prot |= TEE_MATTR_UX;
229 
230 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
231 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
232 		return TEE_ERROR_BAD_PARAMETERS;
233 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
234 
235 	if (!file_trylock(binh->f)) {
236 		/*
237 		 * Before we can block on the file lock we must make all
238 		 * our page tables available for reclaiming in order to
239 		 * avoid a dead-lock with the other thread (which already
240 		 * is holding the file lock) mapping lots of memory below.
241 		 */
242 		vm_set_ctx(NULL);
243 		file_lock(binh->f);
244 		vm_set_ctx(uctx->ts_ctx);
245 	}
246 	file_is_locked = true;
247 	fs = file_find_slice(binh->f, offs_pages);
248 	if (fs) {
249 		/* If there's registered slice it has to match */
250 		if (fs->page_offset != offs_pages ||
251 		    num_pages > fs->fobj->num_pages) {
252 			res = TEE_ERROR_BAD_PARAMETERS;
253 			goto err;
254 		}
255 
256 		/* If there's a slice we must be mapping shareable */
257 		if (!(flags & PTA_SYSTEM_MAP_FLAG_SHAREABLE)) {
258 			res = TEE_ERROR_BAD_PARAMETERS;
259 			goto err;
260 		}
261 
262 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
263 		if (!mobj) {
264 			res = TEE_ERROR_OUT_OF_MEMORY;
265 			goto err;
266 		}
267 		res = vm_map_pad(uctx, &va, num_rounded_bytes,
268 				 prot, VM_FLAG_READONLY,
269 				 mobj, 0, pad_begin, pad_end, 0);
270 		mobj_put(mobj);
271 		if (res)
272 			goto err;
273 	} else {
274 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
275 		struct file *file = NULL;
276 		uint32_t vm_flags = 0;
277 
278 		if (!f) {
279 			res = TEE_ERROR_OUT_OF_MEMORY;
280 			goto err;
281 		}
282 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
283 			file = binh->f;
284 			vm_flags |= VM_FLAG_READONLY;
285 		}
286 
287 		mobj = mobj_with_fobj_alloc(f, file);
288 		fobj_put(f);
289 		if (!mobj) {
290 			res = TEE_ERROR_OUT_OF_MEMORY;
291 			goto err;
292 		}
293 		res = vm_map_pad(uctx, &va, num_rounded_bytes,
294 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
295 				 pad_begin, pad_end, 0);
296 		mobj_put(mobj);
297 		if (res)
298 			goto err;
299 		res = binh_copy_to(binh, va, offs_bytes, num_bytes);
300 		if (res)
301 			goto err_unmap_va;
302 		res = vm_set_prot(uctx, va, num_rounded_bytes,
303 				  prot);
304 		if (res)
305 			goto err_unmap_va;
306 
307 		/*
308 		 * The context currently is active set it again to update
309 		 * the mapping.
310 		 */
311 		vm_set_ctx(uctx->ts_ctx);
312 
313 		if (!(flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)) {
314 			res = file_add_slice(binh->f, f, offs_pages);
315 			if (res)
316 				goto err_unmap_va;
317 		}
318 	}
319 
320 	file_unlock(binh->f);
321 
322 	reg_pair_from_64(va, &params[2].value.a, &params[2].value.b);
323 	return TEE_SUCCESS;
324 
325 err_unmap_va:
326 	if (vm_unmap(uctx, va, num_rounded_bytes))
327 		panic();
328 
329 	/*
330 	 * The context currently is active set it again to update
331 	 * the mapping.
332 	 */
333 	vm_set_ctx(uctx->ts_ctx);
334 
335 err:
336 	if (file_is_locked)
337 		file_unlock(binh->f);
338 
339 	return res;
340 }
341 
342 TEE_Result ldelf_copy_from_ta_binary(struct system_ctx *ctx,
343 				     uint32_t param_types,
344 				     TEE_Param params[TEE_NUM_PARAMS])
345 {
346 	struct bin_handle *binh = NULL;
347 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
348 					  TEE_PARAM_TYPE_MEMREF_OUTPUT,
349 					  TEE_PARAM_TYPE_NONE,
350 					  TEE_PARAM_TYPE_NONE);
351 
352 	if (exp_pt != param_types)
353 		return TEE_ERROR_BAD_PARAMETERS;
354 
355 	binh = handle_lookup(&ctx->db, params[0].value.a);
356 	if (!binh)
357 		return TEE_ERROR_BAD_PARAMETERS;
358 
359 	return binh_copy_to(binh, (vaddr_t)params[1].memref.buffer,
360 			    params[0].value.b, params[1].memref.size);
361 }
362 
363 TEE_Result ldelf_set_prot(struct user_mode_ctx *uctx, uint32_t param_types,
364 			  TEE_Param params[TEE_NUM_PARAMS])
365 {
366 	const uint32_t accept_flags = PTA_SYSTEM_MAP_FLAG_WRITEABLE |
367 				      PTA_SYSTEM_MAP_FLAG_EXECUTABLE;
368 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
369 					  TEE_PARAM_TYPE_VALUE_INPUT,
370 					  TEE_PARAM_TYPE_NONE,
371 					  TEE_PARAM_TYPE_NONE);
372 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
373 	TEE_Result res = TEE_SUCCESS;
374 	uint32_t vm_flags = 0;
375 	uint32_t flags = 0;
376 	vaddr_t end_va = 0;
377 	vaddr_t va = 0;
378 	size_t sz = 0;
379 
380 	if (exp_pt != param_types)
381 		return TEE_ERROR_BAD_PARAMETERS;
382 
383 	flags = params[0].value.b;
384 
385 	if ((flags & accept_flags) != flags)
386 		return TEE_ERROR_BAD_PARAMETERS;
387 	if (flags & PTA_SYSTEM_MAP_FLAG_WRITEABLE)
388 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
389 	if (flags & PTA_SYSTEM_MAP_FLAG_EXECUTABLE)
390 		prot |= TEE_MATTR_UX;
391 
392 	va = reg_pair_to_64(params[1].value.a, params[1].value.b);
393 	sz = ROUNDUP(params[0].value.a, SMALL_PAGE_SIZE);
394 
395 	/*
396 	 * The vm_get_flags() and vm_set_prot() are supposed to detect or
397 	 * handle overflow directly or indirectly. However, this function
398 	 * an API function so an extra guard here is in order. If nothing
399 	 * else to make it easier to review the code.
400 	 */
401 	if (ADD_OVERFLOW(va, sz, &end_va))
402 		return TEE_ERROR_BAD_PARAMETERS;
403 
404 	res = vm_get_flags(uctx, va, sz, &vm_flags);
405 	if (res)
406 		return res;
407 	if (vm_flags & VM_FLAG_PERMANENT)
408 		return TEE_ERROR_ACCESS_DENIED;
409 
410 	/*
411 	 * If the segment is a mapping of a part of a file (vm_flags &
412 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
413 	 * files are mapped read-only.
414 	 */
415 	if ((vm_flags & VM_FLAG_READONLY) &&
416 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
417 		return TEE_ERROR_ACCESS_DENIED;
418 
419 	return vm_set_prot(uctx, va, sz, prot);
420 }
421 
422 TEE_Result ldelf_remap(struct user_mode_ctx *uctx, uint32_t param_types,
423 		       TEE_Param params[TEE_NUM_PARAMS])
424 {
425 	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_VALUE_INPUT,
426 					  TEE_PARAM_TYPE_VALUE_INPUT,
427 					  TEE_PARAM_TYPE_VALUE_INOUT,
428 					  TEE_PARAM_TYPE_VALUE_INPUT);
429 	TEE_Result res = TEE_SUCCESS;
430 	uint32_t num_bytes = 0;
431 	uint32_t pad_begin = 0;
432 	uint32_t vm_flags = 0;
433 	uint32_t pad_end = 0;
434 	vaddr_t old_va = 0;
435 	vaddr_t new_va = 0;
436 
437 	if (exp_pt != param_types)
438 		return TEE_ERROR_BAD_PARAMETERS;
439 
440 	num_bytes = params[0].value.a;
441 	old_va = reg_pair_to_64(params[1].value.a, params[1].value.b);
442 	new_va = reg_pair_to_64(params[2].value.a, params[2].value.b);
443 	pad_begin = params[3].value.a;
444 	pad_end = params[3].value.b;
445 
446 	res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
447 	if (res)
448 		return res;
449 	if (vm_flags & VM_FLAG_PERMANENT)
450 		return TEE_ERROR_ACCESS_DENIED;
451 
452 	res = vm_remap(uctx, &new_va, old_va, num_bytes, pad_begin,
453 		       pad_end);
454 	if (!res)
455 		reg_pair_from_64(new_va, &params[2].value.a,
456 				 &params[2].value.b);
457 
458 	return res;
459 }
460