xref: /optee_os/core/kernel/ldelf_syscalls.c (revision 9fc2442cc66c279cb962c90c4375746fc9b28bb9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018-2019, Linaro Limited
4  * Copyright (c) 2020, Arm Limited
5  */
6 
7 #include <assert.h>
8 #include <crypto/crypto.h>
9 #include <kernel/ldelf_syscalls.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <ldelf.h>
12 #include <mm/file.h>
13 #include <mm/fobj.h>
14 #include <mm/mobj.h>
15 #include <mm/vm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <trace.h>
19 #include <util.h>
20 
21 struct bin_handle {
22 	const struct ts_store_ops *op;
23 	struct ts_store_handle *h;
24 	struct file *f;
25 	size_t offs_bytes;
26 	size_t size_bytes;
27 };
28 
29 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin,
30 				size_t pad_end, unsigned long flags)
31 {
32 	TEE_Result res = TEE_SUCCESS;
33 	struct ts_session *sess = ts_get_current_session();
34 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
35 	struct fobj *f = NULL;
36 	struct mobj *mobj = NULL;
37 	uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW;
38 	uint32_t vm_flags = 0;
39 
40 	if (flags & ~LDELF_MAP_FLAG_SHAREABLE)
41 		return TEE_ERROR_BAD_PARAMETERS;
42 
43 	if (flags & LDELF_MAP_FLAG_SHAREABLE)
44 		vm_flags |= VM_FLAG_SHAREABLE;
45 
46 	f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE));
47 	if (!f)
48 		return TEE_ERROR_OUT_OF_MEMORY;
49 	mobj = mobj_with_fobj_alloc(f, NULL);
50 	fobj_put(f);
51 	if (!mobj)
52 		return TEE_ERROR_OUT_OF_MEMORY;
53 	res = vm_map_pad(uctx, va, num_bytes, prot, vm_flags,
54 			 mobj, 0, pad_begin, pad_end, 0);
55 	mobj_put(mobj);
56 
57 	return res;
58 }
59 
60 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes)
61 {
62 	TEE_Result res = TEE_SUCCESS;
63 	struct ts_session *sess = ts_get_current_session();
64 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
65 	size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
66 	uint32_t vm_flags = 0;
67 	vaddr_t end_va = 0;
68 
69 	/*
70 	 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
71 	 * overflow directly or indirectly. However, since this function is an
72 	 * API function it's worth having an extra guard here. If nothing else,
73 	 * to increase code clarity.
74 	 */
75 	if (ADD_OVERFLOW(va, sz, &end_va))
76 		return TEE_ERROR_BAD_PARAMETERS;
77 
78 	res = vm_get_flags(uctx, va, sz, &vm_flags);
79 	if (res)
80 		return res;
81 	if (vm_flags & VM_FLAG_PERMANENT)
82 		return TEE_ERROR_ACCESS_DENIED;
83 
84 	return vm_unmap(uctx, va, sz);
85 }
86 
87 static void bin_close(void *ptr)
88 {
89 	struct bin_handle *binh = ptr;
90 
91 	if (binh) {
92 		if (binh->op && binh->h)
93 			binh->op->close(binh->h);
94 		file_put(binh->f);
95 	}
96 	free(binh);
97 }
98 
99 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size,
100 				  uint32_t *handle)
101 {
102 	TEE_Result res = TEE_SUCCESS;
103 	struct ts_session *sess = ts_get_current_session();
104 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
105 	struct system_ctx *sys_ctx = sess->user_ctx;
106 	struct bin_handle *binh = NULL;
107 	uint8_t tag[FILE_TAG_SIZE] = { 0 };
108 	unsigned int tag_len = sizeof(tag);
109 	int h = 0;
110 
111 	res = vm_check_access_rights(uctx,
112 				     TEE_MEMORY_ACCESS_READ |
113 				     TEE_MEMORY_ACCESS_ANY_OWNER,
114 				     (uaddr_t)uuid, sizeof(TEE_UUID));
115 	if (res)
116 		return res;
117 
118 	res = vm_check_access_rights(uctx,
119 				     TEE_MEMORY_ACCESS_WRITE |
120 				     TEE_MEMORY_ACCESS_ANY_OWNER,
121 				     (uaddr_t)handle, sizeof(uint32_t));
122 	if (res)
123 		return res;
124 
125 	if (uuid_size != sizeof(*uuid))
126 		return TEE_ERROR_BAD_PARAMETERS;
127 
128 	if (!sys_ctx) {
129 		sys_ctx = calloc(1, sizeof(*sys_ctx));
130 		if (!sys_ctx)
131 			return TEE_ERROR_OUT_OF_MEMORY;
132 		sess->user_ctx = sys_ctx;
133 	}
134 
135 	binh = calloc(1, sizeof(*binh));
136 	if (!binh)
137 		return TEE_ERROR_OUT_OF_MEMORY;
138 
139 	SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, struct ts_store_ops) {
140 		DMSG("Lookup user TA ELF %pUl (%s)",
141 		     (void *)uuid, binh->op->description);
142 
143 		res = binh->op->open(uuid, &binh->h);
144 		DMSG("res=%#"PRIx32, res);
145 		if (res != TEE_ERROR_ITEM_NOT_FOUND &&
146 		    res != TEE_ERROR_STORAGE_NOT_AVAILABLE)
147 			break;
148 	}
149 	if (res)
150 		goto err;
151 
152 	res = binh->op->get_size(binh->h, &binh->size_bytes);
153 	if (res)
154 		goto err;
155 	res = binh->op->get_tag(binh->h, tag, &tag_len);
156 	if (res)
157 		goto err;
158 	binh->f = file_get_by_tag(tag, tag_len);
159 	if (!binh->f)
160 		goto err_oom;
161 
162 	h = handle_get(&sys_ctx->db, binh);
163 	if (h < 0)
164 		goto err_oom;
165 	*handle = h;
166 
167 	return TEE_SUCCESS;
168 
169 err_oom:
170 	res = TEE_ERROR_OUT_OF_MEMORY;
171 err:
172 	bin_close(binh);
173 	return res;
174 }
175 
176 TEE_Result ldelf_syscall_close_bin(unsigned long handle)
177 {
178 	TEE_Result res = TEE_SUCCESS;
179 	struct ts_session *sess = ts_get_current_session();
180 	struct system_ctx *sys_ctx = sess->user_ctx;
181 	struct bin_handle *binh = NULL;
182 
183 	if (!sys_ctx)
184 		return TEE_ERROR_BAD_PARAMETERS;
185 
186 	binh = handle_put(&sys_ctx->db, handle);
187 	if (!binh)
188 		return TEE_ERROR_BAD_PARAMETERS;
189 
190 	if (binh->offs_bytes < binh->size_bytes)
191 		res = binh->op->read(binh->h, NULL,
192 				     binh->size_bytes - binh->offs_bytes);
193 
194 	bin_close(binh);
195 	if (handle_db_is_empty(&sys_ctx->db)) {
196 		handle_db_destroy(&sys_ctx->db, bin_close);
197 		free(sys_ctx);
198 		sess->user_ctx = NULL;
199 	}
200 
201 	return res;
202 }
203 
204 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va,
205 			       size_t offs_bytes, size_t num_bytes)
206 {
207 	TEE_Result res = TEE_SUCCESS;
208 	size_t next_offs = 0;
209 
210 	if (offs_bytes < binh->offs_bytes)
211 		return TEE_ERROR_BAD_STATE;
212 
213 	if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs))
214 		return TEE_ERROR_BAD_PARAMETERS;
215 
216 	if (offs_bytes > binh->offs_bytes) {
217 		res = binh->op->read(binh->h, NULL,
218 				     offs_bytes - binh->offs_bytes);
219 		if (res)
220 			return res;
221 		binh->offs_bytes = offs_bytes;
222 	}
223 
224 	if (next_offs > binh->size_bytes) {
225 		size_t rb = binh->size_bytes - binh->offs_bytes;
226 
227 		res = binh->op->read(binh->h, (void *)va, rb);
228 		if (res)
229 			return res;
230 		memset((uint8_t *)va + rb, 0, num_bytes - rb);
231 		binh->offs_bytes = binh->size_bytes;
232 	} else {
233 		res = binh->op->read(binh->h, (void *)va, num_bytes);
234 		if (res)
235 			return res;
236 		binh->offs_bytes = next_offs;
237 	}
238 
239 	return TEE_SUCCESS;
240 }
241 
242 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes,
243 				 unsigned long handle, size_t offs_bytes,
244 				 size_t pad_begin, size_t pad_end,
245 				 unsigned long flags)
246 {
247 	TEE_Result res = TEE_SUCCESS;
248 	struct ts_session *sess = ts_get_current_session();
249 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
250 	struct system_ctx *sys_ctx = sess->user_ctx;
251 	struct bin_handle *binh = NULL;
252 	uint32_t num_rounded_bytes = 0;
253 	struct file_slice *fs = NULL;
254 	bool file_is_locked = false;
255 	struct mobj *mobj = NULL;
256 	uint32_t offs_pages = 0;
257 	size_t num_pages = 0;
258 	uint32_t prot = 0;
259 	const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE |
260 				      LDELF_MAP_FLAG_WRITEABLE |
261 				      LDELF_MAP_FLAG_EXECUTABLE;
262 
263 	if (!sys_ctx)
264 		return TEE_ERROR_BAD_PARAMETERS;
265 
266 	binh = handle_lookup(&sys_ctx->db, handle);
267 	if (!binh)
268 		return TEE_ERROR_BAD_PARAMETERS;
269 
270 	if ((flags & accept_flags) != flags)
271 		return TEE_ERROR_BAD_PARAMETERS;
272 
273 	if ((flags & LDELF_MAP_FLAG_SHAREABLE) &&
274 	    (flags & LDELF_MAP_FLAG_WRITEABLE))
275 		return TEE_ERROR_BAD_PARAMETERS;
276 
277 	if ((flags & LDELF_MAP_FLAG_EXECUTABLE) &&
278 	    (flags & LDELF_MAP_FLAG_WRITEABLE))
279 		return TEE_ERROR_BAD_PARAMETERS;
280 
281 	if (offs_bytes & SMALL_PAGE_MASK)
282 		return TEE_ERROR_BAD_PARAMETERS;
283 
284 	prot = TEE_MATTR_UR | TEE_MATTR_PR;
285 	if (flags & LDELF_MAP_FLAG_WRITEABLE)
286 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
287 	if (flags & LDELF_MAP_FLAG_EXECUTABLE)
288 		prot |= TEE_MATTR_UX;
289 
290 	offs_pages = offs_bytes >> SMALL_PAGE_SHIFT;
291 	if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes))
292 		return TEE_ERROR_BAD_PARAMETERS;
293 	num_pages = num_rounded_bytes / SMALL_PAGE_SIZE;
294 
295 	if (!file_trylock(binh->f)) {
296 		/*
297 		 * Before we can block on the file lock we must make all
298 		 * our page tables available for reclaiming in order to
299 		 * avoid a dead-lock with the other thread (which already
300 		 * is holding the file lock) mapping lots of memory below.
301 		 */
302 		vm_set_ctx(NULL);
303 		file_lock(binh->f);
304 		vm_set_ctx(uctx->ts_ctx);
305 	}
306 	file_is_locked = true;
307 	fs = file_find_slice(binh->f, offs_pages);
308 	if (fs) {
309 		/* If there's registered slice it has to match */
310 		if (fs->page_offset != offs_pages ||
311 		    num_pages > fs->fobj->num_pages) {
312 			res = TEE_ERROR_BAD_PARAMETERS;
313 			goto err;
314 		}
315 
316 		/* If there's a slice we must be mapping shareable */
317 		if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) {
318 			res = TEE_ERROR_BAD_PARAMETERS;
319 			goto err;
320 		}
321 
322 		mobj = mobj_with_fobj_alloc(fs->fobj, binh->f);
323 		if (!mobj) {
324 			res = TEE_ERROR_OUT_OF_MEMORY;
325 			goto err;
326 		}
327 		res = vm_map_pad(uctx, va, num_rounded_bytes,
328 				 prot, VM_FLAG_READONLY,
329 				 mobj, 0, pad_begin, pad_end, 0);
330 		mobj_put(mobj);
331 		if (res)
332 			goto err;
333 	} else {
334 		struct fobj *f = fobj_ta_mem_alloc(num_pages);
335 		struct file *file = NULL;
336 		uint32_t vm_flags = 0;
337 
338 		if (!f) {
339 			res = TEE_ERROR_OUT_OF_MEMORY;
340 			goto err;
341 		}
342 		if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
343 			file = binh->f;
344 			vm_flags |= VM_FLAG_READONLY;
345 		}
346 
347 		mobj = mobj_with_fobj_alloc(f, file);
348 		fobj_put(f);
349 		if (!mobj) {
350 			res = TEE_ERROR_OUT_OF_MEMORY;
351 			goto err;
352 		}
353 		res = vm_map_pad(uctx, va, num_rounded_bytes,
354 				 TEE_MATTR_PRW, vm_flags, mobj, 0,
355 				 pad_begin, pad_end, 0);
356 		mobj_put(mobj);
357 		if (res)
358 			goto err;
359 		res = binh_copy_to(binh, *va, offs_bytes, num_bytes);
360 		if (res)
361 			goto err_unmap_va;
362 		res = vm_set_prot(uctx, *va, num_rounded_bytes,
363 				  prot);
364 		if (res)
365 			goto err_unmap_va;
366 
367 		/*
368 		 * The context currently is active set it again to update
369 		 * the mapping.
370 		 */
371 		vm_set_ctx(uctx->ts_ctx);
372 
373 		if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) {
374 			res = file_add_slice(binh->f, f, offs_pages);
375 			if (res)
376 				goto err_unmap_va;
377 		}
378 	}
379 
380 	file_unlock(binh->f);
381 
382 	return TEE_SUCCESS;
383 
384 err_unmap_va:
385 	if (vm_unmap(uctx, *va, num_rounded_bytes))
386 		panic();
387 
388 	/*
389 	 * The context currently is active set it again to update
390 	 * the mapping.
391 	 */
392 	vm_set_ctx(uctx->ts_ctx);
393 
394 err:
395 	if (file_is_locked)
396 		file_unlock(binh->f);
397 
398 	return res;
399 }
400 
401 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes,
402 				       unsigned long handle)
403 {
404 	TEE_Result res = TEE_SUCCESS;
405 	struct ts_session *sess = ts_get_current_session();
406 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
407 	struct system_ctx *sys_ctx = sess->user_ctx;
408 	struct bin_handle *binh = NULL;
409 
410 	res = vm_check_access_rights(uctx,
411 				     TEE_MEMORY_ACCESS_WRITE |
412 				     TEE_MEMORY_ACCESS_ANY_OWNER,
413 				     (uaddr_t)dst, num_bytes);
414 	if (res)
415 		return res;
416 
417 	if (!sys_ctx)
418 		return TEE_ERROR_BAD_PARAMETERS;
419 
420 	binh = handle_lookup(&sys_ctx->db, handle);
421 	if (!binh)
422 		return TEE_ERROR_BAD_PARAMETERS;
423 
424 	return binh_copy_to(binh, (vaddr_t)dst, offs, num_bytes);
425 }
426 
427 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes,
428 				  unsigned long flags)
429 {
430 	TEE_Result res = TEE_SUCCESS;
431 	struct ts_session *sess = ts_get_current_session();
432 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
433 	size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE);
434 	uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR;
435 	uint32_t vm_flags = 0;
436 	vaddr_t end_va = 0;
437 	const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE |
438 				      LDELF_MAP_FLAG_EXECUTABLE;
439 
440 	if ((flags & accept_flags) != flags)
441 		return TEE_ERROR_BAD_PARAMETERS;
442 	if (flags & LDELF_MAP_FLAG_WRITEABLE)
443 		prot |= TEE_MATTR_UW | TEE_MATTR_PW;
444 	if (flags & LDELF_MAP_FLAG_EXECUTABLE)
445 		prot |= TEE_MATTR_UX;
446 
447 	/*
448 	 * The vm_get_flags() and vm_unmap() are supposed to detect or handle
449 	 * overflow directly or indirectly. However, since this function is an
450 	 * API function it's worth having an extra guard here. If nothing else,
451 	 * to increase code clarity.
452 	 */
453 	if (ADD_OVERFLOW(va, sz, &end_va))
454 		return TEE_ERROR_BAD_PARAMETERS;
455 
456 	res = vm_get_flags(uctx, va, sz, &vm_flags);
457 	if (res)
458 		return res;
459 	if (vm_flags & VM_FLAG_PERMANENT)
460 		return TEE_ERROR_ACCESS_DENIED;
461 
462 	/*
463 	 * If the segment is a mapping of a part of a file (vm_flags &
464 	 * VM_FLAG_READONLY) it cannot be made writeable as all mapped
465 	 * files are mapped read-only.
466 	 */
467 	if ((vm_flags & VM_FLAG_READONLY) &&
468 	    (prot & (TEE_MATTR_UW | TEE_MATTR_PW)))
469 		return TEE_ERROR_ACCESS_DENIED;
470 
471 	return vm_set_prot(uctx, va, sz, prot);
472 }
473 
474 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va,
475 			       size_t num_bytes, size_t pad_begin,
476 			       size_t pad_end)
477 {
478 	TEE_Result res = TEE_SUCCESS;
479 	struct ts_session *sess = ts_get_current_session();
480 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
481 	uint32_t vm_flags = 0;
482 
483 	res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags);
484 	if (res)
485 		return res;
486 	if (vm_flags & VM_FLAG_PERMANENT)
487 		return TEE_ERROR_ACCESS_DENIED;
488 
489 	res = vm_remap(uctx, new_va, old_va, num_bytes, pad_begin, pad_end);
490 
491 	return res;
492 }
493 
494 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes)
495 {
496 	TEE_Result res = TEE_SUCCESS;
497 	struct ts_session *sess = ts_get_current_session();
498 	struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx);
499 
500 	res = vm_check_access_rights(uctx,
501 				     TEE_MEMORY_ACCESS_WRITE |
502 				     TEE_MEMORY_ACCESS_ANY_OWNER,
503 				     (uaddr_t)buf, num_bytes);
504 	if (res)
505 		return res;
506 
507 	return crypto_rng_read(buf, num_bytes);
508 }
509 
510 /*
511  * Should be called after returning from ldelf. If user_ctx is not NULL means
512  * that ldelf crashed or otherwise didn't complete properly. This function will
513  * close the remaining handles and free the context structs allocated by ldelf.
514  */
515 void ldelf_sess_cleanup(struct ts_session *sess)
516 {
517 	struct system_ctx *sys_ctx = sess->user_ctx;
518 
519 	if (sys_ctx) {
520 		handle_db_destroy(&sys_ctx->db, bin_close);
521 		free(sys_ctx);
522 		sess->user_ctx = NULL;
523 	}
524 }
525