1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <kernel/mutex.h> 9 #include <kernel/nv_counter.h> 10 #include <kernel/panic.h> 11 #include <kernel/thread.h> 12 #include <kernel/user_access.h> 13 #include <mempool.h> 14 #include <mm/core_memprot.h> 15 #include <mm/tee_pager.h> 16 #include <optee_rpc_cmd.h> 17 #include <stdlib.h> 18 #include <string.h> 19 #include <sys/queue.h> 20 #include <tee/fs_dirfile.h> 21 #include <tee/fs_htree.h> 22 #include <tee/tee_fs.h> 23 #include <tee/tee_fs_rpc.h> 24 #include <tee/tee_pobj.h> 25 #include <trace.h> 26 #include <utee_defines.h> 27 #include <util.h> 28 29 #define BLOCK_SHIFT 12 30 31 #define BLOCK_SIZE (1 << BLOCK_SHIFT) 32 33 struct tee_fs_fd { 34 struct tee_fs_htree *ht; 35 int fd; 36 struct tee_fs_dirfile_fileh dfh; 37 const TEE_UUID *uuid; 38 }; 39 40 struct tee_fs_dir { 41 struct tee_fs_dirfile_dirh *dirh; 42 int idx; 43 struct tee_fs_dirent d; 44 const TEE_UUID *uuid; 45 }; 46 47 static int pos_to_block_num(int position) 48 { 49 return position >> BLOCK_SHIFT; 50 } 51 52 static struct mutex ree_fs_mutex = MUTEX_INITIALIZER; 53 54 static void *get_tmp_block(void) 55 { 56 return mempool_alloc(mempool_default, BLOCK_SIZE); 57 } 58 59 static void put_tmp_block(void *tmp_block) 60 { 61 mempool_free(mempool_default, tmp_block); 62 } 63 64 static TEE_Result out_of_place_write(struct tee_fs_fd *fdp, size_t pos, 65 const void *buf_core, 66 const void *buf_user, size_t len) 67 { 68 TEE_Result res; 69 size_t start_block_num = pos_to_block_num(pos); 70 size_t end_block_num = pos_to_block_num(pos + len - 1); 71 size_t remain_bytes = len; 72 uint8_t *data_core_ptr = (uint8_t *)buf_core; 73 uint8_t *data_user_ptr = (uint8_t *)buf_user; 74 uint8_t *block; 75 struct tee_fs_htree_meta *meta = tee_fs_htree_get_meta(fdp->ht); 76 77 /* 78 * It doesn't make sense to call this function if nothing is to be 79 * written. This also guards against end_block_num getting an 80 * unexpected value when pos == 0 and len == 0. 81 */ 82 if (!len) 83 return TEE_ERROR_BAD_PARAMETERS; 84 85 block = get_tmp_block(); 86 if (!block) 87 return TEE_ERROR_OUT_OF_MEMORY; 88 89 while (start_block_num <= end_block_num) { 90 size_t offset = pos % BLOCK_SIZE; 91 size_t size_to_write = MIN(remain_bytes, (size_t)BLOCK_SIZE); 92 93 if (size_to_write + offset > BLOCK_SIZE) 94 size_to_write = BLOCK_SIZE - offset; 95 96 if (start_block_num * BLOCK_SIZE < 97 ROUNDUP(meta->length, BLOCK_SIZE)) { 98 res = tee_fs_htree_read_block(&fdp->ht, 99 start_block_num, block); 100 if (res != TEE_SUCCESS) 101 goto exit; 102 } else { 103 memset(block, 0, BLOCK_SIZE); 104 } 105 106 if (data_core_ptr) { 107 memcpy(block + offset, data_core_ptr, size_to_write); 108 } else if (data_user_ptr) { 109 res = copy_from_user(block + offset, data_user_ptr, 110 size_to_write); 111 if (res) 112 return res; 113 } else { 114 memset(block + offset, 0, size_to_write); 115 } 116 117 res = tee_fs_htree_write_block(&fdp->ht, start_block_num, 118 block); 119 if (res != TEE_SUCCESS) 120 goto exit; 121 122 if (data_core_ptr) 123 data_core_ptr += size_to_write; 124 if (data_user_ptr) 125 data_user_ptr += size_to_write; 126 remain_bytes -= size_to_write; 127 start_block_num++; 128 pos += size_to_write; 129 } 130 131 if (pos > meta->length) { 132 meta->length = pos; 133 tee_fs_htree_meta_set_dirty(fdp->ht); 134 } 135 136 exit: 137 if (block) 138 put_tmp_block(block); 139 return res; 140 } 141 142 static TEE_Result get_offs_size(enum tee_fs_htree_type type, size_t idx, 143 uint8_t vers, size_t *offs, size_t *size) 144 { 145 const size_t node_size = sizeof(struct tee_fs_htree_node_image); 146 const size_t block_nodes = BLOCK_SIZE / (node_size * 2); 147 size_t pbn; 148 size_t bidx; 149 150 assert(vers == 0 || vers == 1); 151 152 /* 153 * File layout 154 * [demo with input: 155 * BLOCK_SIZE = 4096, 156 * node_size = 66, 157 * block_nodes = 4096/(66*2) = 31 ] 158 * 159 * phys block 0: 160 * tee_fs_htree_image vers 0 @ offs = 0 161 * tee_fs_htree_image vers 1 @ offs = sizeof(tee_fs_htree_image) 162 * 163 * phys block 1: 164 * tee_fs_htree_node_image 0 vers 0 @ offs = 0 165 * tee_fs_htree_node_image 0 vers 1 @ offs = node_size 166 * tee_fs_htree_node_image 1 vers 0 @ offs = node_size * 2 167 * tee_fs_htree_node_image 1 vers 1 @ offs = node_size * 3 168 * ... 169 * tee_fs_htree_node_image 30 vers 0 @ offs = node_size * 60 170 * tee_fs_htree_node_image 30 vers 1 @ offs = node_size * 61 171 * 172 * phys block 2: 173 * data block 0 vers 0 174 * 175 * phys block 3: 176 * data block 0 vers 1 177 * 178 * ... 179 * phys block 62: 180 * data block 30 vers 0 181 * 182 * phys block 63: 183 * data block 30 vers 1 184 * 185 * phys block 64: 186 * tee_fs_htree_node_image 31 vers 0 @ offs = 0 187 * tee_fs_htree_node_image 31 vers 1 @ offs = node_size 188 * tee_fs_htree_node_image 32 vers 0 @ offs = node_size * 2 189 * tee_fs_htree_node_image 32 vers 1 @ offs = node_size * 3 190 * ... 191 * tee_fs_htree_node_image 61 vers 0 @ offs = node_size * 60 192 * tee_fs_htree_node_image 61 vers 1 @ offs = node_size * 61 193 * 194 * phys block 65: 195 * data block 31 vers 0 196 * 197 * phys block 66: 198 * data block 31 vers 1 199 * ... 200 */ 201 202 switch (type) { 203 case TEE_FS_HTREE_TYPE_HEAD: 204 *offs = sizeof(struct tee_fs_htree_image) * vers; 205 *size = sizeof(struct tee_fs_htree_image); 206 return TEE_SUCCESS; 207 case TEE_FS_HTREE_TYPE_NODE: 208 pbn = 1 + ((idx / block_nodes) * block_nodes * 2); 209 *offs = pbn * BLOCK_SIZE + 210 2 * node_size * (idx % block_nodes) + 211 node_size * vers; 212 *size = node_size; 213 return TEE_SUCCESS; 214 case TEE_FS_HTREE_TYPE_BLOCK: 215 bidx = 2 * idx + vers; 216 pbn = 2 + bidx + bidx / (block_nodes * 2 - 1); 217 *offs = pbn * BLOCK_SIZE; 218 *size = BLOCK_SIZE; 219 return TEE_SUCCESS; 220 default: 221 return TEE_ERROR_GENERIC; 222 } 223 } 224 225 static TEE_Result ree_fs_rpc_read_init(void *aux, 226 struct tee_fs_rpc_operation *op, 227 enum tee_fs_htree_type type, size_t idx, 228 uint8_t vers, void **data) 229 { 230 struct tee_fs_fd *fdp = aux; 231 TEE_Result res; 232 size_t offs; 233 size_t size; 234 235 res = get_offs_size(type, idx, vers, &offs, &size); 236 if (res != TEE_SUCCESS) 237 return res; 238 239 return tee_fs_rpc_read_init(op, OPTEE_RPC_CMD_FS, fdp->fd, 240 offs, size, data); 241 } 242 243 static TEE_Result ree_fs_rpc_write_init(void *aux, 244 struct tee_fs_rpc_operation *op, 245 enum tee_fs_htree_type type, size_t idx, 246 uint8_t vers, void **data) 247 { 248 struct tee_fs_fd *fdp = aux; 249 TEE_Result res; 250 size_t offs; 251 size_t size; 252 253 res = get_offs_size(type, idx, vers, &offs, &size); 254 if (res != TEE_SUCCESS) 255 return res; 256 257 return tee_fs_rpc_write_init(op, OPTEE_RPC_CMD_FS, fdp->fd, 258 offs, size, data); 259 } 260 261 static const struct tee_fs_htree_storage ree_fs_storage_ops = { 262 .block_size = BLOCK_SIZE, 263 .rpc_read_init = ree_fs_rpc_read_init, 264 .rpc_read_final = tee_fs_rpc_read_final, 265 .rpc_write_init = ree_fs_rpc_write_init, 266 .rpc_write_final = tee_fs_rpc_write_final, 267 }; 268 269 static TEE_Result ree_fs_ftruncate_internal(struct tee_fs_fd *fdp, 270 tee_fs_off_t new_file_len) 271 { 272 TEE_Result res; 273 struct tee_fs_htree_meta *meta = tee_fs_htree_get_meta(fdp->ht); 274 275 if ((size_t)new_file_len > meta->length) { 276 size_t ext_len = new_file_len - meta->length; 277 278 res = out_of_place_write(fdp, meta->length, NULL, NULL, 279 ext_len); 280 if (res != TEE_SUCCESS) 281 return res; 282 } else { 283 size_t offs; 284 size_t sz; 285 286 res = get_offs_size(TEE_FS_HTREE_TYPE_BLOCK, 287 ROUNDUP_DIV(new_file_len, BLOCK_SIZE), 1, 288 &offs, &sz); 289 if (res != TEE_SUCCESS) 290 return res; 291 292 res = tee_fs_htree_truncate(&fdp->ht, 293 new_file_len / BLOCK_SIZE); 294 if (res != TEE_SUCCESS) 295 return res; 296 297 res = tee_fs_rpc_truncate(OPTEE_RPC_CMD_FS, fdp->fd, 298 offs + sz); 299 if (res != TEE_SUCCESS) 300 return res; 301 302 meta->length = new_file_len; 303 tee_fs_htree_meta_set_dirty(fdp->ht); 304 } 305 306 return TEE_SUCCESS; 307 } 308 309 static TEE_Result ree_fs_read_primitive(struct tee_file_handle *fh, size_t pos, 310 void *buf_core, void *buf_user, 311 size_t *len) 312 { 313 TEE_Result res; 314 int start_block_num; 315 int end_block_num; 316 size_t remain_bytes; 317 uint8_t *data_core_ptr = buf_core; 318 uint8_t *data_user_ptr = buf_user; 319 uint8_t *block = NULL; 320 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 321 struct tee_fs_htree_meta *meta = tee_fs_htree_get_meta(fdp->ht); 322 323 /* One of buf_core and buf_user must be NULL */ 324 assert(!buf_core || !buf_user); 325 326 remain_bytes = *len; 327 if ((pos + remain_bytes) < remain_bytes || pos > meta->length) 328 remain_bytes = 0; 329 else if (pos + remain_bytes > meta->length) 330 remain_bytes = meta->length - pos; 331 332 *len = remain_bytes; 333 334 if (!remain_bytes) { 335 res = TEE_SUCCESS; 336 goto exit; 337 } 338 339 start_block_num = pos_to_block_num(pos); 340 end_block_num = pos_to_block_num(pos + remain_bytes - 1); 341 342 block = get_tmp_block(); 343 if (!block) { 344 res = TEE_ERROR_OUT_OF_MEMORY; 345 goto exit; 346 } 347 348 while (start_block_num <= end_block_num) { 349 size_t offset = pos % BLOCK_SIZE; 350 size_t size_to_read = MIN(remain_bytes, (size_t)BLOCK_SIZE); 351 352 if (size_to_read + offset > BLOCK_SIZE) 353 size_to_read = BLOCK_SIZE - offset; 354 355 res = tee_fs_htree_read_block(&fdp->ht, start_block_num, block); 356 if (res != TEE_SUCCESS) 357 goto exit; 358 359 if (data_core_ptr) { 360 memcpy(data_core_ptr, block + offset, size_to_read); 361 data_core_ptr += size_to_read; 362 } else if (data_user_ptr) { 363 res = copy_to_user(data_user_ptr, block + offset, 364 size_to_read); 365 if (res) 366 goto exit; 367 data_user_ptr += size_to_read; 368 } 369 370 remain_bytes -= size_to_read; 371 pos += size_to_read; 372 373 start_block_num++; 374 } 375 res = TEE_SUCCESS; 376 exit: 377 if (block) 378 put_tmp_block(block); 379 return res; 380 } 381 382 static TEE_Result ree_fs_read(struct tee_file_handle *fh, size_t pos, 383 void *buf_core, void *buf_user, size_t *len) 384 { 385 TEE_Result res; 386 387 mutex_lock(&ree_fs_mutex); 388 res = ree_fs_read_primitive(fh, pos, buf_core, buf_user, len); 389 mutex_unlock(&ree_fs_mutex); 390 391 return res; 392 } 393 394 static TEE_Result ree_fs_write_primitive(struct tee_file_handle *fh, size_t pos, 395 const void *buf_core, 396 const void *buf_user, size_t len) 397 { 398 TEE_Result res; 399 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 400 size_t file_size; 401 402 /* One of buf_core and buf_user must be NULL */ 403 assert(!buf_core || !buf_user); 404 405 if (!len) 406 return TEE_SUCCESS; 407 408 file_size = tee_fs_htree_get_meta(fdp->ht)->length; 409 410 if ((pos + len) < len) 411 return TEE_ERROR_BAD_PARAMETERS; 412 413 if (file_size < pos) { 414 res = ree_fs_ftruncate_internal(fdp, pos); 415 if (res != TEE_SUCCESS) 416 return res; 417 } 418 419 return out_of_place_write(fdp, pos, buf_core, buf_user, len); 420 } 421 422 static TEE_Result ree_fs_open_primitive(bool create, uint8_t *hash, 423 uint32_t min_counter, 424 const TEE_UUID *uuid, 425 struct tee_fs_dirfile_fileh *dfh, 426 struct tee_file_handle **fh) 427 { 428 TEE_Result res; 429 struct tee_fs_fd *fdp; 430 431 fdp = calloc(1, sizeof(struct tee_fs_fd)); 432 if (!fdp) 433 return TEE_ERROR_OUT_OF_MEMORY; 434 fdp->fd = -1; 435 fdp->uuid = uuid; 436 437 if (create) 438 res = tee_fs_rpc_create_dfh(OPTEE_RPC_CMD_FS, 439 dfh, &fdp->fd); 440 else 441 res = tee_fs_rpc_open_dfh(OPTEE_RPC_CMD_FS, dfh, &fdp->fd); 442 443 if (res != TEE_SUCCESS) 444 goto out; 445 446 res = tee_fs_htree_open(create, hash, min_counter, uuid, 447 &ree_fs_storage_ops, fdp, &fdp->ht); 448 out: 449 if (res == TEE_SUCCESS) { 450 if (dfh) 451 fdp->dfh = *dfh; 452 else 453 fdp->dfh.idx = -1; 454 *fh = (struct tee_file_handle *)fdp; 455 } else { 456 if (res == TEE_ERROR_SECURITY) 457 DMSG("Secure storage corruption detected"); 458 if (fdp->fd != -1) 459 tee_fs_rpc_close(OPTEE_RPC_CMD_FS, fdp->fd); 460 /* 461 * Remove the file if hash is NULL and min_counter is 0, 462 * as it is not yet rollback-protected 463 */ 464 if (create || (!hash && !min_counter)) { 465 DMSG("Remove corrupt file"); 466 tee_fs_rpc_remove_dfh(OPTEE_RPC_CMD_FS, dfh); 467 } 468 free(fdp); 469 } 470 471 return res; 472 } 473 474 static void ree_fs_close_primitive(struct tee_file_handle *fh) 475 { 476 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 477 478 if (fdp) { 479 tee_fs_htree_close(&fdp->ht); 480 tee_fs_rpc_close(OPTEE_RPC_CMD_FS, fdp->fd); 481 free(fdp); 482 } 483 } 484 485 static TEE_Result ree_dirf_commit_writes(struct tee_file_handle *fh, 486 uint8_t *hash, uint32_t *counter) 487 { 488 TEE_Result res; 489 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 490 491 res = tee_fs_htree_sync_to_storage(&fdp->ht, fdp->dfh.hash, counter); 492 493 if (!res && hash) 494 memcpy(hash, fdp->dfh.hash, sizeof(fdp->dfh.hash)); 495 496 return res; 497 } 498 499 static TEE_Result dirf_read(struct tee_file_handle *fh, size_t pos, void *buf, 500 size_t *len) 501 { 502 return ree_fs_read_primitive(fh, pos, buf, NULL, len); 503 } 504 505 static TEE_Result dirf_write(struct tee_file_handle *fh, size_t pos, 506 const void *buf, size_t len) 507 { 508 return ree_fs_write_primitive(fh, pos, buf, NULL, len); 509 } 510 511 static const struct tee_fs_dirfile_operations ree_dirf_ops = { 512 .open = ree_fs_open_primitive, 513 .close = ree_fs_close_primitive, 514 .read = dirf_read, 515 .write = dirf_write, 516 .commit_writes = ree_dirf_commit_writes, 517 }; 518 519 /* 520 * ree_fs_dirh is caching the dirfile handle to avoid frequent opening and 521 * closing of that handle. When ree_fs_dirh_refcount reaches 0, ree_fs_dirh 522 * will be freed. However, ree_fs_dirh_refcount > 0 is not a guarantee that 523 * ree_fs_dirh will not be freed, it may very well be freed earlier in an 524 * error path. get_dirh() must be used to get the ree_fs_dirh pointer each 525 * time it's needed if ree_fs_mutex has been unlocked in between. 526 */ 527 static struct tee_fs_dirfile_dirh *ree_fs_dirh; 528 static size_t ree_fs_dirh_refcount; 529 530 #ifdef CFG_REE_FS_INTEGRITY_RPMB 531 static struct tee_file_handle *ree_fs_rpmb_fh; 532 533 static TEE_Result open_dirh(struct tee_fs_dirfile_dirh **dirh) 534 { 535 TEE_Result res; 536 uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; 537 uint8_t *hashp = NULL; 538 const char fname[] = "dirfile.db.hash"; 539 540 res = tee_rpmb_fs_raw_open(fname, false, &ree_fs_rpmb_fh); 541 if (!res) { 542 size_t l = sizeof(hash); 543 544 res = rpmb_fs_ops.read(ree_fs_rpmb_fh, 0, hash, NULL, &l); 545 if (res) 546 return res; 547 if (l == sizeof(hash)) 548 hashp = hash; 549 } else if (res == TEE_ERROR_ITEM_NOT_FOUND) { 550 res = tee_rpmb_fs_raw_open(fname, true, &ree_fs_rpmb_fh); 551 } 552 if (res) 553 return res; 554 555 res = tee_fs_dirfile_open(false, hashp, 0, &ree_dirf_ops, dirh); 556 557 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 558 if (hashp) { 559 if (IS_ENABLED(CFG_REE_FS_ALLOW_RESET)) { 560 DMSG("dirf.db not found, clear hash in RPMB"); 561 res = rpmb_fs_ops.truncate(ree_fs_rpmb_fh, 0); 562 if (res) { 563 DMSG("Can't clear hash: %#"PRIx32, res); 564 res = TEE_ERROR_SECURITY; 565 goto out; 566 } 567 } else { 568 DMSG("dirf.db file not found"); 569 res = TEE_ERROR_SECURITY; 570 goto out; 571 } 572 } 573 574 DMSG("Create dirf.db"); 575 res = tee_fs_dirfile_open(true, NULL, 0, &ree_dirf_ops, dirh); 576 } 577 578 out: 579 if (res) 580 rpmb_fs_ops.close(&ree_fs_rpmb_fh); 581 582 return res; 583 } 584 585 static TEE_Result commit_dirh_writes(struct tee_fs_dirfile_dirh *dirh) 586 { 587 TEE_Result res; 588 uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; 589 590 res = tee_fs_dirfile_commit_writes(dirh, hash, NULL); 591 if (res) 592 return res; 593 return rpmb_fs_ops.write(ree_fs_rpmb_fh, 0, hash, NULL, sizeof(hash)); 594 } 595 596 static void close_dirh(struct tee_fs_dirfile_dirh **dirh) 597 { 598 tee_fs_dirfile_close(*dirh); 599 *dirh = NULL; 600 rpmb_fs_ops.close(&ree_fs_rpmb_fh); 601 } 602 603 #else /*!CFG_REE_FS_INTEGRITY_RPMB*/ 604 static TEE_Result open_dirh(struct tee_fs_dirfile_dirh **dirh) 605 { 606 TEE_Result res = TEE_SUCCESS; 607 uint32_t min_counter = 0; 608 609 res = nv_counter_get_ree_fs(&min_counter); 610 if (res) { 611 static bool once; 612 613 if (res != TEE_ERROR_NOT_IMPLEMENTED || 614 !IS_ENABLED(CFG_INSECURE)) 615 return res; 616 617 if (!once) { 618 IMSG("WARNING (insecure configuration): Failed to get monotonic counter for REE FS, using 0"); 619 once = true; 620 } 621 min_counter = 0; 622 } 623 res = tee_fs_dirfile_open(false, NULL, min_counter, &ree_dirf_ops, 624 dirh); 625 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 626 if (min_counter) { 627 if (!IS_ENABLED(CFG_REE_FS_ALLOW_RESET)) { 628 DMSG("dirf.db file not found"); 629 return TEE_ERROR_SECURITY; 630 } 631 DMSG("dirf.db not found, initializing with a non-zero monotonic counter"); 632 } 633 return tee_fs_dirfile_open(true, NULL, min_counter, 634 &ree_dirf_ops, dirh); 635 } 636 637 return res; 638 } 639 640 static TEE_Result commit_dirh_writes(struct tee_fs_dirfile_dirh *dirh) 641 { 642 TEE_Result res = TEE_SUCCESS; 643 uint32_t counter = 0; 644 645 res = tee_fs_dirfile_commit_writes(dirh, NULL, &counter); 646 if (res) 647 return res; 648 res = nv_counter_incr_ree_fs_to(counter); 649 if (res == TEE_ERROR_NOT_IMPLEMENTED && IS_ENABLED(CFG_INSECURE)) { 650 static bool once; 651 652 if (!once) { 653 IMSG("WARNING (insecure configuration): Failed to commit dirh counter %"PRIu32, counter); 654 once = true; 655 } 656 return TEE_SUCCESS; 657 } 658 return res; 659 } 660 661 static void close_dirh(struct tee_fs_dirfile_dirh **dirh) 662 { 663 tee_fs_dirfile_close(*dirh); 664 *dirh = NULL; 665 } 666 #endif /*!CFG_REE_FS_INTEGRITY_RPMB*/ 667 668 static TEE_Result get_dirh(struct tee_fs_dirfile_dirh **dirh) 669 { 670 if (!ree_fs_dirh) { 671 TEE_Result res = open_dirh(&ree_fs_dirh); 672 673 if (res) { 674 *dirh = NULL; 675 return res; 676 } 677 } 678 ree_fs_dirh_refcount++; 679 assert(ree_fs_dirh); 680 assert(ree_fs_dirh_refcount); 681 *dirh = ree_fs_dirh; 682 return TEE_SUCCESS; 683 } 684 685 static void put_dirh_primitive(bool close) 686 { 687 assert(ree_fs_dirh_refcount); 688 689 /* 690 * During the execution of one of the ree_fs_ops ree_fs_dirh is 691 * guareteed to be a valid pointer. But when the fop has returned 692 * another thread may get an error or something causing that fop 693 * to do a put with close=1. 694 * 695 * For all fops but ree_fs_close() there's a call to get_dirh() to 696 * get a new dirh which will open it again if it was closed before. 697 * But in the ree_fs_close() case there's no call to get_dirh() 698 * only to this function, put_dirh_primitive(), and in this case 699 * ree_fs_dirh may actually be NULL. 700 */ 701 ree_fs_dirh_refcount--; 702 if (ree_fs_dirh && (!ree_fs_dirh_refcount || close)) 703 close_dirh(&ree_fs_dirh); 704 } 705 706 static void put_dirh(struct tee_fs_dirfile_dirh *dirh, bool close) 707 { 708 if (dirh) { 709 assert(dirh == ree_fs_dirh); 710 put_dirh_primitive(close); 711 } 712 } 713 714 static TEE_Result ree_fs_open(struct tee_pobj *po, size_t *size, 715 struct tee_file_handle **fh) 716 { 717 TEE_Result res; 718 struct tee_fs_dirfile_dirh *dirh = NULL; 719 struct tee_fs_dirfile_fileh dfh; 720 721 mutex_lock(&ree_fs_mutex); 722 723 res = get_dirh(&dirh); 724 if (res != TEE_SUCCESS) 725 goto out; 726 727 res = tee_fs_dirfile_find(dirh, &po->uuid, po->obj_id, po->obj_id_len, 728 &dfh); 729 if (res != TEE_SUCCESS) 730 goto out; 731 732 res = ree_fs_open_primitive(false, dfh.hash, 0, &po->uuid, &dfh, fh); 733 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 734 /* 735 * If the object isn't found someone has tampered with it, 736 * treat it as corrupt. 737 */ 738 res = TEE_ERROR_CORRUPT_OBJECT; 739 } else if (!res && size) { 740 struct tee_fs_fd *fdp = (struct tee_fs_fd *)*fh; 741 742 *size = tee_fs_htree_get_meta(fdp->ht)->length; 743 } 744 745 out: 746 if (res) 747 put_dirh(dirh, true); 748 mutex_unlock(&ree_fs_mutex); 749 750 return res; 751 } 752 753 static TEE_Result set_name(struct tee_fs_dirfile_dirh *dirh, 754 struct tee_fs_fd *fdp, struct tee_pobj *po, 755 bool overwrite) 756 { 757 TEE_Result res; 758 bool have_old_dfh = false; 759 struct tee_fs_dirfile_fileh old_dfh = { .idx = -1 }; 760 761 res = tee_fs_dirfile_find(dirh, &po->uuid, po->obj_id, po->obj_id_len, 762 &old_dfh); 763 if (!overwrite && !res) 764 return TEE_ERROR_ACCESS_CONFLICT; 765 766 if (!res) 767 have_old_dfh = true; 768 769 /* 770 * If old_dfh wasn't found, the idx will be -1 and 771 * tee_fs_dirfile_rename() will allocate a new index. 772 */ 773 fdp->dfh.idx = old_dfh.idx; 774 old_dfh.idx = -1; 775 res = tee_fs_dirfile_rename(dirh, &po->uuid, &fdp->dfh, 776 po->obj_id, po->obj_id_len); 777 if (res) 778 return res; 779 780 res = commit_dirh_writes(dirh); 781 if (res) 782 return res; 783 784 if (have_old_dfh) 785 tee_fs_rpc_remove_dfh(OPTEE_RPC_CMD_FS, &old_dfh); 786 787 return TEE_SUCCESS; 788 } 789 790 static void ree_fs_close(struct tee_file_handle **fh) 791 { 792 if (*fh) { 793 mutex_lock(&ree_fs_mutex); 794 put_dirh_primitive(false); 795 ree_fs_close_primitive(*fh); 796 *fh = NULL; 797 mutex_unlock(&ree_fs_mutex); 798 799 } 800 } 801 802 static TEE_Result ree_fs_create(struct tee_pobj *po, bool overwrite, 803 const void *head, size_t head_size, 804 const void *attr, size_t attr_size, 805 const void *data_core, const void *data_user, 806 size_t data_size, struct tee_file_handle **fh) 807 { 808 struct tee_fs_fd *fdp; 809 struct tee_fs_dirfile_dirh *dirh = NULL; 810 struct tee_fs_dirfile_fileh dfh; 811 TEE_Result res; 812 size_t pos = 0; 813 814 /* One of data_core and data_user must be NULL */ 815 assert(!data_core || !data_user); 816 817 *fh = NULL; 818 mutex_lock(&ree_fs_mutex); 819 820 res = get_dirh(&dirh); 821 if (res) 822 goto out; 823 824 res = tee_fs_dirfile_get_tmp(dirh, &dfh); 825 if (res) 826 goto out; 827 828 res = ree_fs_open_primitive(true, dfh.hash, 0, &po->uuid, &dfh, fh); 829 if (res) 830 goto out; 831 832 if (head && head_size) { 833 res = ree_fs_write_primitive(*fh, pos, head, NULL, head_size); 834 if (res) 835 goto out; 836 pos += head_size; 837 } 838 839 if (attr && attr_size) { 840 res = ree_fs_write_primitive(*fh, pos, attr, NULL, attr_size); 841 if (res) 842 goto out; 843 pos += attr_size; 844 } 845 846 if ((data_core || data_user) && data_size) { 847 res = ree_fs_write_primitive(*fh, pos, data_core, data_user, 848 data_size); 849 if (res) 850 goto out; 851 } 852 853 fdp = (struct tee_fs_fd *)*fh; 854 res = tee_fs_htree_sync_to_storage(&fdp->ht, fdp->dfh.hash, NULL); 855 if (res) 856 goto out; 857 858 res = set_name(dirh, fdp, po, overwrite); 859 out: 860 if (res) { 861 put_dirh(dirh, true); 862 if (*fh) { 863 ree_fs_close_primitive(*fh); 864 *fh = NULL; 865 tee_fs_rpc_remove_dfh(OPTEE_RPC_CMD_FS, &dfh); 866 } 867 } 868 mutex_unlock(&ree_fs_mutex); 869 870 return res; 871 } 872 873 static TEE_Result ree_fs_write(struct tee_file_handle *fh, size_t pos, 874 const void *buf_core, const void *buf_user, 875 size_t len) 876 { 877 TEE_Result res; 878 struct tee_fs_dirfile_dirh *dirh = NULL; 879 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 880 881 /* One of buf_core and buf_user must be NULL */ 882 assert(!buf_core || !buf_user); 883 884 mutex_lock(&ree_fs_mutex); 885 886 res = get_dirh(&dirh); 887 if (res) 888 goto out; 889 890 res = ree_fs_write_primitive(fh, pos, buf_core, buf_user, len); 891 if (res) 892 goto out; 893 894 res = tee_fs_htree_sync_to_storage(&fdp->ht, fdp->dfh.hash, NULL); 895 if (res) 896 goto out; 897 898 res = tee_fs_dirfile_update_hash(dirh, &fdp->dfh); 899 if (res) 900 goto out; 901 res = commit_dirh_writes(dirh); 902 out: 903 put_dirh(dirh, res); 904 mutex_unlock(&ree_fs_mutex); 905 906 return res; 907 } 908 909 static TEE_Result ree_fs_rename(struct tee_pobj *old, struct tee_pobj *new, 910 bool overwrite) 911 { 912 TEE_Result res; 913 struct tee_fs_dirfile_dirh *dirh = NULL; 914 struct tee_fs_dirfile_fileh dfh; 915 struct tee_fs_dirfile_fileh remove_dfh = { .idx = -1 }; 916 917 if (!new) 918 return TEE_ERROR_BAD_PARAMETERS; 919 920 mutex_lock(&ree_fs_mutex); 921 res = get_dirh(&dirh); 922 if (res) 923 goto out; 924 925 res = tee_fs_dirfile_find(dirh, &new->uuid, new->obj_id, 926 new->obj_id_len, &remove_dfh); 927 if (!res && !overwrite) { 928 res = TEE_ERROR_ACCESS_CONFLICT; 929 goto out; 930 } 931 932 res = tee_fs_dirfile_find(dirh, &old->uuid, old->obj_id, 933 old->obj_id_len, &dfh); 934 if (res) 935 goto out; 936 937 res = tee_fs_dirfile_rename(dirh, &new->uuid, &dfh, new->obj_id, 938 new->obj_id_len); 939 if (res) 940 goto out; 941 942 if (remove_dfh.idx != -1) { 943 res = tee_fs_dirfile_remove(dirh, &remove_dfh); 944 if (res) 945 goto out; 946 } 947 948 res = commit_dirh_writes(dirh); 949 if (res) 950 goto out; 951 952 if (remove_dfh.idx != -1) 953 tee_fs_rpc_remove_dfh(OPTEE_RPC_CMD_FS, &remove_dfh); 954 955 out: 956 put_dirh(dirh, res); 957 mutex_unlock(&ree_fs_mutex); 958 959 return res; 960 961 } 962 963 static TEE_Result ree_fs_remove(struct tee_pobj *po) 964 { 965 TEE_Result res; 966 struct tee_fs_dirfile_dirh *dirh = NULL; 967 struct tee_fs_dirfile_fileh dfh; 968 969 mutex_lock(&ree_fs_mutex); 970 res = get_dirh(&dirh); 971 if (res) 972 goto out; 973 974 res = tee_fs_dirfile_find(dirh, &po->uuid, po->obj_id, po->obj_id_len, 975 &dfh); 976 if (res) 977 goto out; 978 979 res = tee_fs_dirfile_remove(dirh, &dfh); 980 if (res) 981 goto out; 982 983 res = commit_dirh_writes(dirh); 984 if (res) 985 goto out; 986 987 tee_fs_rpc_remove_dfh(OPTEE_RPC_CMD_FS, &dfh); 988 989 assert(tee_fs_dirfile_find(dirh, &po->uuid, po->obj_id, po->obj_id_len, 990 &dfh)); 991 out: 992 put_dirh(dirh, res); 993 mutex_unlock(&ree_fs_mutex); 994 995 return res; 996 } 997 998 static TEE_Result ree_fs_truncate(struct tee_file_handle *fh, size_t len) 999 { 1000 TEE_Result res; 1001 struct tee_fs_dirfile_dirh *dirh = NULL; 1002 struct tee_fs_fd *fdp = (struct tee_fs_fd *)fh; 1003 1004 mutex_lock(&ree_fs_mutex); 1005 1006 res = get_dirh(&dirh); 1007 if (res) 1008 goto out; 1009 1010 res = ree_fs_ftruncate_internal(fdp, len); 1011 if (res) 1012 goto out; 1013 1014 res = tee_fs_htree_sync_to_storage(&fdp->ht, fdp->dfh.hash, NULL); 1015 if (res) 1016 goto out; 1017 1018 res = tee_fs_dirfile_update_hash(dirh, &fdp->dfh); 1019 if (res) 1020 goto out; 1021 res = commit_dirh_writes(dirh); 1022 out: 1023 put_dirh(dirh, res); 1024 mutex_unlock(&ree_fs_mutex); 1025 1026 return res; 1027 } 1028 1029 static TEE_Result ree_fs_opendir_rpc(const TEE_UUID *uuid, 1030 struct tee_fs_dir **dir) 1031 1032 { 1033 TEE_Result res = TEE_SUCCESS; 1034 struct tee_fs_dirfile_dirh *dirh = NULL; 1035 struct tee_fs_dir *d = calloc(1, sizeof(*d)); 1036 1037 if (!d) 1038 return TEE_ERROR_OUT_OF_MEMORY; 1039 1040 d->uuid = uuid; 1041 1042 mutex_lock(&ree_fs_mutex); 1043 1044 res = get_dirh(&dirh); 1045 if (res) 1046 goto out; 1047 1048 /* See that there's at least one file */ 1049 d->idx = -1; 1050 d->d.oidlen = sizeof(d->d.oid); 1051 res = tee_fs_dirfile_get_next(dirh, d->uuid, &d->idx, d->d.oid, 1052 &d->d.oidlen); 1053 d->idx = -1; 1054 1055 out: 1056 if (!res) { 1057 *dir = d; 1058 } else { 1059 if (d) 1060 put_dirh(dirh, false); 1061 free(d); 1062 } 1063 mutex_unlock(&ree_fs_mutex); 1064 1065 return res; 1066 } 1067 1068 static void ree_fs_closedir_rpc(struct tee_fs_dir *d) 1069 { 1070 if (d) { 1071 mutex_lock(&ree_fs_mutex); 1072 1073 put_dirh(ree_fs_dirh, false); 1074 free(d); 1075 1076 mutex_unlock(&ree_fs_mutex); 1077 } 1078 } 1079 1080 static TEE_Result ree_fs_readdir_rpc(struct tee_fs_dir *d, 1081 struct tee_fs_dirent **ent) 1082 { 1083 struct tee_fs_dirfile_dirh *dirh = NULL; 1084 TEE_Result res = TEE_SUCCESS; 1085 1086 mutex_lock(&ree_fs_mutex); 1087 1088 res = get_dirh(&dirh); 1089 if (res) 1090 goto out; 1091 1092 d->d.oidlen = sizeof(d->d.oid); 1093 res = tee_fs_dirfile_get_next(dirh, d->uuid, &d->idx, d->d.oid, 1094 &d->d.oidlen); 1095 if (res == TEE_SUCCESS) 1096 *ent = &d->d; 1097 1098 put_dirh(dirh, res); 1099 out: 1100 mutex_unlock(&ree_fs_mutex); 1101 1102 return res; 1103 } 1104 1105 const struct tee_file_operations ree_fs_ops = { 1106 .open = ree_fs_open, 1107 .create = ree_fs_create, 1108 .close = ree_fs_close, 1109 .read = ree_fs_read, 1110 .write = ree_fs_write, 1111 .truncate = ree_fs_truncate, 1112 .rename = ree_fs_rename, 1113 .remove = ree_fs_remove, 1114 .opendir = ree_fs_opendir_rpc, 1115 .closedir = ree_fs_closedir_rpc, 1116 .readdir = ree_fs_readdir_rpc, 1117 }; 1118