1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2018-2019, Linaro Limited 4 * Copyright (c) 2020-2021, Arm Limited 5 */ 6 7 #include <assert.h> 8 #include <crypto/crypto.h> 9 #include <kernel/ldelf_syscalls.h> 10 #include <kernel/user_mode_ctx.h> 11 #include <ldelf.h> 12 #include <mm/file.h> 13 #include <mm/fobj.h> 14 #include <mm/mobj.h> 15 #include <mm/vm.h> 16 #include <stdlib.h> 17 #include <string.h> 18 #include <trace.h> 19 #include <util.h> 20 21 struct bin_handle { 22 const struct ts_store_ops *op; 23 struct ts_store_handle *h; 24 struct file *f; 25 size_t offs_bytes; 26 size_t size_bytes; 27 }; 28 29 TEE_Result ldelf_syscall_map_zi(vaddr_t *va, size_t num_bytes, size_t pad_begin, 30 size_t pad_end, unsigned long flags) 31 { 32 TEE_Result res = TEE_SUCCESS; 33 struct ts_session *sess = ts_get_current_session(); 34 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 35 struct fobj *f = NULL; 36 struct mobj *mobj = NULL; 37 uint32_t prot = TEE_MATTR_URW | TEE_MATTR_PRW; 38 uint32_t vm_flags = 0; 39 40 if (flags & ~LDELF_MAP_FLAG_SHAREABLE) 41 return TEE_ERROR_BAD_PARAMETERS; 42 43 if (flags & LDELF_MAP_FLAG_SHAREABLE) 44 vm_flags |= VM_FLAG_SHAREABLE; 45 46 f = fobj_ta_mem_alloc(ROUNDUP_DIV(num_bytes, SMALL_PAGE_SIZE)); 47 if (!f) 48 return TEE_ERROR_OUT_OF_MEMORY; 49 mobj = mobj_with_fobj_alloc(f, NULL); 50 fobj_put(f); 51 if (!mobj) 52 return TEE_ERROR_OUT_OF_MEMORY; 53 res = vm_map_pad(uctx, va, num_bytes, prot, vm_flags, 54 mobj, 0, pad_begin, pad_end, 0); 55 mobj_put(mobj); 56 57 return res; 58 } 59 60 TEE_Result ldelf_syscall_unmap(vaddr_t va, size_t num_bytes) 61 { 62 TEE_Result res = TEE_SUCCESS; 63 struct ts_session *sess = ts_get_current_session(); 64 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 65 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); 66 uint32_t vm_flags = 0; 67 vaddr_t end_va = 0; 68 69 /* 70 * The vm_get_flags() and vm_unmap() are supposed to detect or handle 71 * overflow directly or indirectly. However, since this function is an 72 * API function it's worth having an extra guard here. If nothing else, 73 * to increase code clarity. 74 */ 75 if (ADD_OVERFLOW(va, sz, &end_va)) 76 return TEE_ERROR_BAD_PARAMETERS; 77 78 res = vm_get_flags(uctx, va, sz, &vm_flags); 79 if (res) 80 return res; 81 if (vm_flags & VM_FLAG_PERMANENT) 82 return TEE_ERROR_ACCESS_DENIED; 83 84 return vm_unmap(uctx, va, sz); 85 } 86 87 static void bin_close(void *ptr) 88 { 89 struct bin_handle *binh = ptr; 90 91 if (binh) { 92 if (binh->op && binh->h) 93 binh->op->close(binh->h); 94 file_put(binh->f); 95 } 96 free(binh); 97 } 98 99 TEE_Result ldelf_syscall_open_bin(const TEE_UUID *uuid, size_t uuid_size, 100 uint32_t *handle) 101 { 102 TEE_Result res = TEE_SUCCESS; 103 struct ts_session *sess = ts_get_current_session(); 104 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 105 struct system_ctx *sys_ctx = sess->user_ctx; 106 struct bin_handle *binh = NULL; 107 uint8_t tag[FILE_TAG_SIZE] = { 0 }; 108 unsigned int tag_len = sizeof(tag); 109 int h = 0; 110 111 res = vm_check_access_rights(uctx, 112 TEE_MEMORY_ACCESS_READ | 113 TEE_MEMORY_ACCESS_ANY_OWNER, 114 (uaddr_t)uuid, sizeof(TEE_UUID)); 115 if (res) 116 return res; 117 118 res = vm_check_access_rights(uctx, 119 TEE_MEMORY_ACCESS_WRITE | 120 TEE_MEMORY_ACCESS_ANY_OWNER, 121 (uaddr_t)handle, sizeof(uint32_t)); 122 if (res) 123 return res; 124 125 if (uuid_size != sizeof(*uuid)) 126 return TEE_ERROR_BAD_PARAMETERS; 127 128 if (!sys_ctx) { 129 sys_ctx = calloc(1, sizeof(*sys_ctx)); 130 if (!sys_ctx) 131 return TEE_ERROR_OUT_OF_MEMORY; 132 sess->user_ctx = sys_ctx; 133 } 134 135 binh = calloc(1, sizeof(*binh)); 136 if (!binh) 137 return TEE_ERROR_OUT_OF_MEMORY; 138 139 if (is_user_ta_ctx(sess->ctx) || is_stmm_ctx(sess->ctx)) { 140 SCATTERED_ARRAY_FOREACH(binh->op, ta_stores, 141 struct ts_store_ops) { 142 DMSG("Lookup user TA ELF %pUl (%s)", 143 (void *)uuid, binh->op->description); 144 145 res = binh->op->open(uuid, &binh->h); 146 DMSG("res=%#"PRIx32, res); 147 if (res != TEE_ERROR_ITEM_NOT_FOUND && 148 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 149 break; 150 } 151 } else if (is_sp_ctx(sess->ctx)) { 152 SCATTERED_ARRAY_FOREACH(binh->op, sp_stores, 153 struct ts_store_ops) { 154 DMSG("Lookup user SP ELF %pUl (%s)", 155 (void *)uuid, binh->op->description); 156 157 res = binh->op->open(uuid, &binh->h); 158 DMSG("res=%#"PRIx32, res); 159 if (res != TEE_ERROR_ITEM_NOT_FOUND && 160 res != TEE_ERROR_STORAGE_NOT_AVAILABLE) 161 break; 162 } 163 } else { 164 res = TEE_ERROR_ITEM_NOT_FOUND; 165 } 166 167 if (res) 168 goto err; 169 170 res = binh->op->get_size(binh->h, &binh->size_bytes); 171 if (res) 172 goto err; 173 res = binh->op->get_tag(binh->h, tag, &tag_len); 174 if (res) 175 goto err; 176 binh->f = file_get_by_tag(tag, tag_len); 177 if (!binh->f) 178 goto err_oom; 179 180 h = handle_get(&sys_ctx->db, binh); 181 if (h < 0) 182 goto err_oom; 183 *handle = h; 184 185 return TEE_SUCCESS; 186 187 err_oom: 188 res = TEE_ERROR_OUT_OF_MEMORY; 189 err: 190 bin_close(binh); 191 return res; 192 } 193 194 TEE_Result ldelf_syscall_close_bin(unsigned long handle) 195 { 196 TEE_Result res = TEE_SUCCESS; 197 struct ts_session *sess = ts_get_current_session(); 198 struct system_ctx *sys_ctx = sess->user_ctx; 199 struct bin_handle *binh = NULL; 200 201 if (!sys_ctx) 202 return TEE_ERROR_BAD_PARAMETERS; 203 204 binh = handle_put(&sys_ctx->db, handle); 205 if (!binh) 206 return TEE_ERROR_BAD_PARAMETERS; 207 208 if (binh->offs_bytes < binh->size_bytes) 209 res = binh->op->read(binh->h, NULL, 210 binh->size_bytes - binh->offs_bytes); 211 212 bin_close(binh); 213 if (handle_db_is_empty(&sys_ctx->db)) { 214 handle_db_destroy(&sys_ctx->db, bin_close); 215 free(sys_ctx); 216 sess->user_ctx = NULL; 217 } 218 219 return res; 220 } 221 222 static TEE_Result binh_copy_to(struct bin_handle *binh, vaddr_t va, 223 size_t offs_bytes, size_t num_bytes) 224 { 225 TEE_Result res = TEE_SUCCESS; 226 size_t next_offs = 0; 227 228 if (offs_bytes < binh->offs_bytes) 229 return TEE_ERROR_BAD_STATE; 230 231 if (ADD_OVERFLOW(offs_bytes, num_bytes, &next_offs)) 232 return TEE_ERROR_BAD_PARAMETERS; 233 234 if (offs_bytes > binh->offs_bytes) { 235 res = binh->op->read(binh->h, NULL, 236 offs_bytes - binh->offs_bytes); 237 if (res) 238 return res; 239 binh->offs_bytes = offs_bytes; 240 } 241 242 if (next_offs > binh->size_bytes) { 243 size_t rb = binh->size_bytes - binh->offs_bytes; 244 245 res = binh->op->read(binh->h, (void *)va, rb); 246 if (res) 247 return res; 248 memset((uint8_t *)va + rb, 0, num_bytes - rb); 249 binh->offs_bytes = binh->size_bytes; 250 } else { 251 res = binh->op->read(binh->h, (void *)va, num_bytes); 252 if (res) 253 return res; 254 binh->offs_bytes = next_offs; 255 } 256 257 return TEE_SUCCESS; 258 } 259 260 TEE_Result ldelf_syscall_map_bin(vaddr_t *va, size_t num_bytes, 261 unsigned long handle, size_t offs_bytes, 262 size_t pad_begin, size_t pad_end, 263 unsigned long flags) 264 { 265 TEE_Result res = TEE_SUCCESS; 266 struct ts_session *sess = ts_get_current_session(); 267 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 268 struct system_ctx *sys_ctx = sess->user_ctx; 269 struct bin_handle *binh = NULL; 270 uint32_t num_rounded_bytes = 0; 271 struct file_slice *fs = NULL; 272 bool file_is_locked = false; 273 struct mobj *mobj = NULL; 274 uint32_t offs_pages = 0; 275 size_t num_pages = 0; 276 uint32_t prot = 0; 277 const uint32_t accept_flags = LDELF_MAP_FLAG_SHAREABLE | 278 LDELF_MAP_FLAG_WRITEABLE | 279 LDELF_MAP_FLAG_EXECUTABLE; 280 281 if (!sys_ctx) 282 return TEE_ERROR_BAD_PARAMETERS; 283 284 binh = handle_lookup(&sys_ctx->db, handle); 285 if (!binh) 286 return TEE_ERROR_BAD_PARAMETERS; 287 288 if ((flags & accept_flags) != flags) 289 return TEE_ERROR_BAD_PARAMETERS; 290 291 if ((flags & LDELF_MAP_FLAG_SHAREABLE) && 292 (flags & LDELF_MAP_FLAG_WRITEABLE)) 293 return TEE_ERROR_BAD_PARAMETERS; 294 295 if ((flags & LDELF_MAP_FLAG_EXECUTABLE) && 296 (flags & LDELF_MAP_FLAG_WRITEABLE)) 297 return TEE_ERROR_BAD_PARAMETERS; 298 299 if (offs_bytes & SMALL_PAGE_MASK) 300 return TEE_ERROR_BAD_PARAMETERS; 301 302 prot = TEE_MATTR_UR | TEE_MATTR_PR; 303 if (flags & LDELF_MAP_FLAG_WRITEABLE) 304 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 305 if (flags & LDELF_MAP_FLAG_EXECUTABLE) 306 prot |= TEE_MATTR_UX; 307 308 offs_pages = offs_bytes >> SMALL_PAGE_SHIFT; 309 if (ROUNDUP_OVERFLOW(num_bytes, SMALL_PAGE_SIZE, &num_rounded_bytes)) 310 return TEE_ERROR_BAD_PARAMETERS; 311 num_pages = num_rounded_bytes / SMALL_PAGE_SIZE; 312 313 if (!file_trylock(binh->f)) { 314 /* 315 * Before we can block on the file lock we must make all 316 * our page tables available for reclaiming in order to 317 * avoid a dead-lock with the other thread (which already 318 * is holding the file lock) mapping lots of memory below. 319 */ 320 vm_set_ctx(NULL); 321 file_lock(binh->f); 322 vm_set_ctx(uctx->ts_ctx); 323 } 324 file_is_locked = true; 325 fs = file_find_slice(binh->f, offs_pages); 326 if (fs) { 327 /* If there's registered slice it has to match */ 328 if (fs->page_offset != offs_pages || 329 num_pages > fs->fobj->num_pages) { 330 res = TEE_ERROR_BAD_PARAMETERS; 331 goto err; 332 } 333 334 /* If there's a slice we must be mapping shareable */ 335 if (!(flags & LDELF_MAP_FLAG_SHAREABLE)) { 336 res = TEE_ERROR_BAD_PARAMETERS; 337 goto err; 338 } 339 340 mobj = mobj_with_fobj_alloc(fs->fobj, binh->f); 341 if (!mobj) { 342 res = TEE_ERROR_OUT_OF_MEMORY; 343 goto err; 344 } 345 res = vm_map_pad(uctx, va, num_rounded_bytes, 346 prot, VM_FLAG_READONLY, 347 mobj, 0, pad_begin, pad_end, 0); 348 mobj_put(mobj); 349 if (res) 350 goto err; 351 } else { 352 struct fobj *f = fobj_ta_mem_alloc(num_pages); 353 struct file *file = NULL; 354 uint32_t vm_flags = 0; 355 356 if (!f) { 357 res = TEE_ERROR_OUT_OF_MEMORY; 358 goto err; 359 } 360 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) { 361 file = binh->f; 362 vm_flags |= VM_FLAG_READONLY; 363 } 364 365 mobj = mobj_with_fobj_alloc(f, file); 366 fobj_put(f); 367 if (!mobj) { 368 res = TEE_ERROR_OUT_OF_MEMORY; 369 goto err; 370 } 371 res = vm_map_pad(uctx, va, num_rounded_bytes, 372 TEE_MATTR_PRW, vm_flags, mobj, 0, 373 pad_begin, pad_end, 0); 374 mobj_put(mobj); 375 if (res) 376 goto err; 377 res = binh_copy_to(binh, *va, offs_bytes, num_bytes); 378 if (res) 379 goto err_unmap_va; 380 res = vm_set_prot(uctx, *va, num_rounded_bytes, 381 prot); 382 if (res) 383 goto err_unmap_va; 384 385 /* 386 * The context currently is active set it again to update 387 * the mapping. 388 */ 389 vm_set_ctx(uctx->ts_ctx); 390 391 if (!(flags & LDELF_MAP_FLAG_WRITEABLE)) { 392 res = file_add_slice(binh->f, f, offs_pages); 393 if (res) 394 goto err_unmap_va; 395 } 396 } 397 398 file_unlock(binh->f); 399 400 return TEE_SUCCESS; 401 402 err_unmap_va: 403 if (vm_unmap(uctx, *va, num_rounded_bytes)) 404 panic(); 405 406 /* 407 * The context currently is active set it again to update 408 * the mapping. 409 */ 410 vm_set_ctx(uctx->ts_ctx); 411 412 err: 413 if (file_is_locked) 414 file_unlock(binh->f); 415 416 return res; 417 } 418 419 TEE_Result ldelf_syscall_copy_from_bin(void *dst, size_t offs, size_t num_bytes, 420 unsigned long handle) 421 { 422 TEE_Result res = TEE_SUCCESS; 423 struct ts_session *sess = ts_get_current_session(); 424 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 425 struct system_ctx *sys_ctx = sess->user_ctx; 426 struct bin_handle *binh = NULL; 427 428 res = vm_check_access_rights(uctx, 429 TEE_MEMORY_ACCESS_WRITE | 430 TEE_MEMORY_ACCESS_ANY_OWNER, 431 (uaddr_t)dst, num_bytes); 432 if (res) 433 return res; 434 435 if (!sys_ctx) 436 return TEE_ERROR_BAD_PARAMETERS; 437 438 binh = handle_lookup(&sys_ctx->db, handle); 439 if (!binh) 440 return TEE_ERROR_BAD_PARAMETERS; 441 442 return binh_copy_to(binh, (vaddr_t)dst, offs, num_bytes); 443 } 444 445 TEE_Result ldelf_syscall_set_prot(unsigned long va, size_t num_bytes, 446 unsigned long flags) 447 { 448 TEE_Result res = TEE_SUCCESS; 449 struct ts_session *sess = ts_get_current_session(); 450 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 451 size_t sz = ROUNDUP(num_bytes, SMALL_PAGE_SIZE); 452 uint32_t prot = TEE_MATTR_UR | TEE_MATTR_PR; 453 uint32_t vm_flags = 0; 454 vaddr_t end_va = 0; 455 const uint32_t accept_flags = LDELF_MAP_FLAG_WRITEABLE | 456 LDELF_MAP_FLAG_EXECUTABLE; 457 458 if ((flags & accept_flags) != flags) 459 return TEE_ERROR_BAD_PARAMETERS; 460 if (flags & LDELF_MAP_FLAG_WRITEABLE) 461 prot |= TEE_MATTR_UW | TEE_MATTR_PW; 462 if (flags & LDELF_MAP_FLAG_EXECUTABLE) 463 prot |= TEE_MATTR_UX; 464 465 /* 466 * The vm_get_flags() and vm_unmap() are supposed to detect or handle 467 * overflow directly or indirectly. However, since this function is an 468 * API function it's worth having an extra guard here. If nothing else, 469 * to increase code clarity. 470 */ 471 if (ADD_OVERFLOW(va, sz, &end_va)) 472 return TEE_ERROR_BAD_PARAMETERS; 473 474 res = vm_get_flags(uctx, va, sz, &vm_flags); 475 if (res) 476 return res; 477 if (vm_flags & VM_FLAG_PERMANENT) 478 return TEE_ERROR_ACCESS_DENIED; 479 480 /* 481 * If the segment is a mapping of a part of a file (vm_flags & 482 * VM_FLAG_READONLY) it cannot be made writeable as all mapped 483 * files are mapped read-only. 484 */ 485 if ((vm_flags & VM_FLAG_READONLY) && 486 (prot & (TEE_MATTR_UW | TEE_MATTR_PW))) 487 return TEE_ERROR_ACCESS_DENIED; 488 489 return vm_set_prot(uctx, va, sz, prot); 490 } 491 492 TEE_Result ldelf_syscall_remap(unsigned long old_va, vaddr_t *new_va, 493 size_t num_bytes, size_t pad_begin, 494 size_t pad_end) 495 { 496 TEE_Result res = TEE_SUCCESS; 497 struct ts_session *sess = ts_get_current_session(); 498 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 499 uint32_t vm_flags = 0; 500 501 res = vm_get_flags(uctx, old_va, num_bytes, &vm_flags); 502 if (res) 503 return res; 504 if (vm_flags & VM_FLAG_PERMANENT) 505 return TEE_ERROR_ACCESS_DENIED; 506 507 res = vm_remap(uctx, new_va, old_va, num_bytes, pad_begin, pad_end); 508 509 return res; 510 } 511 512 TEE_Result ldelf_syscall_gen_rnd_num(void *buf, size_t num_bytes) 513 { 514 TEE_Result res = TEE_SUCCESS; 515 struct ts_session *sess = ts_get_current_session(); 516 struct user_mode_ctx *uctx = to_user_mode_ctx(sess->ctx); 517 518 res = vm_check_access_rights(uctx, 519 TEE_MEMORY_ACCESS_WRITE | 520 TEE_MEMORY_ACCESS_ANY_OWNER, 521 (uaddr_t)buf, num_bytes); 522 if (res) 523 return res; 524 525 return crypto_rng_read(buf, num_bytes); 526 } 527 528 /* 529 * Should be called after returning from ldelf. If user_ctx is not NULL means 530 * that ldelf crashed or otherwise didn't complete properly. This function will 531 * close the remaining handles and free the context structs allocated by ldelf. 532 */ 533 void ldelf_sess_cleanup(struct ts_session *sess) 534 { 535 struct system_ctx *sys_ctx = sess->user_ctx; 536 537 if (sys_ctx) { 538 handle_db_destroy(&sys_ctx->db, bin_close); 539 free(sys_ctx); 540 sess->user_ctx = NULL; 541 } 542 } 543