1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2017, Linaro Limited 4 */ 5 6 #include <assert.h> 7 #include <string.h> 8 #include <tee/fs_htree.h> 9 #include <tee/tee_fs_rpc.h> 10 #include <trace.h> 11 #include <types_ext.h> 12 #include <util.h> 13 14 #include "misc.h" 15 16 /* 17 * The smallest blocks size that can hold two struct 18 * tee_fs_htree_node_image or two struct tee_fs_htree_image. 19 */ 20 #define TEST_BLOCK_SIZE 144 21 22 struct test_aux { 23 uint8_t *data; 24 size_t data_len; 25 size_t data_alloced; 26 uint8_t *block; 27 }; 28 29 static TEE_Result test_get_offs_size(enum tee_fs_htree_type type, size_t idx, 30 uint8_t vers, size_t *offs, size_t *size) 31 { 32 const size_t node_size = sizeof(struct tee_fs_htree_node_image); 33 const size_t block_nodes = TEST_BLOCK_SIZE / (node_size * 2); 34 size_t pbn; 35 size_t bidx; 36 37 COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE > 38 sizeof(struct tee_fs_htree_node_image) * 2); 39 COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE > 40 sizeof(struct tee_fs_htree_image) * 2); 41 42 assert(vers == 0 || vers == 1); 43 44 /* 45 * File layout 46 * 47 * phys block 0: 48 * tee_fs_htree_image vers 0 @ offs = 0 49 * tee_fs_htree_image vers 1 @ offs = sizeof(tee_fs_htree_image) 50 * 51 * phys block 1: 52 * tee_fs_htree_node_image 0 vers 0 @ offs = 0 53 * tee_fs_htree_node_image 0 vers 1 @ offs = node_size 54 * 55 * phys block 2: 56 * data block 0 vers 0 57 * 58 * phys block 3: 59 * tee_fs_htree_node_image 1 vers 0 @ offs = 0 60 * tee_fs_htree_node_image 1 vers 1 @ offs = node_size 61 * 62 * phys block 4: 63 * data block 0 vers 1 64 * 65 * ... 66 */ 67 68 switch (type) { 69 case TEE_FS_HTREE_TYPE_HEAD: 70 *offs = sizeof(struct tee_fs_htree_image) * vers; 71 *size = sizeof(struct tee_fs_htree_image); 72 return TEE_SUCCESS; 73 case TEE_FS_HTREE_TYPE_NODE: 74 pbn = 1 + ((idx / block_nodes) * block_nodes * 2); 75 *offs = pbn * TEST_BLOCK_SIZE + 76 2 * node_size * (idx % block_nodes) + 77 node_size * vers; 78 *size = node_size; 79 return TEE_SUCCESS; 80 case TEE_FS_HTREE_TYPE_BLOCK: 81 bidx = 2 * idx + vers; 82 pbn = 2 + bidx + bidx / (block_nodes * 2 - 1); 83 *offs = pbn * TEST_BLOCK_SIZE; 84 *size = TEST_BLOCK_SIZE; 85 return TEE_SUCCESS; 86 default: 87 return TEE_ERROR_GENERIC; 88 } 89 } 90 91 static TEE_Result test_read_init(void *aux, struct tee_fs_rpc_operation *op, 92 enum tee_fs_htree_type type, size_t idx, 93 uint8_t vers, void **data) 94 { 95 TEE_Result res; 96 struct test_aux *a = aux; 97 size_t offs; 98 size_t sz; 99 100 res = test_get_offs_size(type, idx, vers, &offs, &sz); 101 if (res == TEE_SUCCESS) { 102 memset(op, 0, sizeof(*op)); 103 op->params[0].u.value.a = (vaddr_t)aux; 104 op->params[0].u.value.b = offs; 105 op->params[0].u.value.c = sz; 106 *data = a->block; 107 } 108 109 return res; 110 } 111 112 static void *uint_to_ptr(uintptr_t p) 113 { 114 return (void *)p; 115 } 116 117 static TEE_Result test_read_final(struct tee_fs_rpc_operation *op, 118 size_t *bytes) 119 { 120 struct test_aux *a = uint_to_ptr(op->params[0].u.value.a); 121 size_t offs = op->params[0].u.value.b; 122 size_t sz = op->params[0].u.value.c; 123 124 if (offs + sz <= a->data_len) 125 *bytes = sz; 126 else if (offs <= a->data_len) 127 *bytes = a->data_len - offs; 128 else 129 *bytes = 0; 130 131 memcpy(a->block, a->data + offs, *bytes); 132 return TEE_SUCCESS; 133 } 134 135 static TEE_Result test_write_init(void *aux, struct tee_fs_rpc_operation *op, 136 enum tee_fs_htree_type type, size_t idx, 137 uint8_t vers, void **data) 138 { 139 return test_read_init(aux, op, type, idx, vers, data); 140 } 141 142 static TEE_Result test_write_final(struct tee_fs_rpc_operation *op) 143 { 144 struct test_aux *a = uint_to_ptr(op->params[0].u.value.a); 145 size_t offs = op->params[0].u.value.b; 146 size_t sz = op->params[0].u.value.c; 147 size_t end = offs + sz; 148 149 if (end > a->data_alloced) { 150 EMSG("out of bounds"); 151 return TEE_ERROR_GENERIC; 152 } 153 154 memcpy(a->data + offs, a->block, sz); 155 if (end > a->data_len) 156 a->data_len = end; 157 return TEE_SUCCESS; 158 159 } 160 161 static const struct tee_fs_htree_storage test_htree_ops = { 162 .block_size = TEST_BLOCK_SIZE, 163 .rpc_read_init = test_read_init, 164 .rpc_read_final = test_read_final, 165 .rpc_write_init = test_write_init, 166 .rpc_write_final = test_write_final, 167 }; 168 169 #define CHECK_RES(res, cleanup) \ 170 do { \ 171 TEE_Result _res = (res); \ 172 \ 173 if (_res != TEE_SUCCESS) { \ 174 EMSG("error: res = %#" PRIx32, _res); \ 175 { cleanup; } \ 176 } \ 177 } while (0) 178 179 static uint32_t val_from_bn_n_salt(size_t bn, size_t n, uint8_t salt) 180 { 181 assert(bn < UINT16_MAX); 182 assert(n < UINT8_MAX); 183 return SHIFT_U32(n, 16) | SHIFT_U32(bn, 8) | salt; 184 } 185 186 static TEE_Result write_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt) 187 { 188 uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)]; 189 size_t n; 190 191 for (n = 0; n < ARRAY_SIZE(b); n++) 192 b[n] = val_from_bn_n_salt(bn, n, salt); 193 194 return tee_fs_htree_write_block(ht, bn, b); 195 } 196 197 static TEE_Result read_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt) 198 { 199 TEE_Result res; 200 uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)]; 201 size_t n; 202 203 res = tee_fs_htree_read_block(ht, bn, b); 204 if (res != TEE_SUCCESS) 205 return res; 206 207 for (n = 0; n < ARRAY_SIZE(b); n++) { 208 if (b[n] != val_from_bn_n_salt(bn, n, salt)) { 209 DMSG("Unpected b[%zu] %#" PRIx32 210 "(expected %#" PRIx32 ")", 211 n, b[n], val_from_bn_n_salt(bn, n, salt)); 212 return TEE_ERROR_TIME_NOT_SET; 213 } 214 } 215 216 return TEE_SUCCESS; 217 } 218 219 static TEE_Result do_range(TEE_Result (*fn)(struct tee_fs_htree **ht, 220 size_t bn, uint8_t salt), 221 struct tee_fs_htree **ht, size_t begin, 222 size_t num_blocks, size_t salt) 223 { 224 TEE_Result res = TEE_SUCCESS; 225 size_t n; 226 227 for (n = 0; n < num_blocks; n++) { 228 res = fn(ht, n + begin, salt); 229 CHECK_RES(res, goto out); 230 } 231 232 out: 233 return res; 234 } 235 236 static TEE_Result do_range_backwards(TEE_Result (*fn)(struct tee_fs_htree **ht, 237 size_t bn, uint8_t salt), 238 struct tee_fs_htree **ht, size_t begin, 239 size_t num_blocks, size_t salt) 240 { 241 TEE_Result res = TEE_SUCCESS; 242 size_t n; 243 244 for (n = 0; n < num_blocks; n++) { 245 res = fn(ht, num_blocks - 1 - n + begin, salt); 246 CHECK_RES(res, goto out); 247 } 248 249 out: 250 return res; 251 } 252 253 static TEE_Result htree_test_rewrite(struct test_aux *aux, size_t num_blocks, 254 size_t w_unsync_begin, size_t w_unsync_num) 255 { 256 TEE_Result res; 257 struct tee_fs_htree *ht = NULL; 258 size_t salt = 23; 259 uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; 260 const TEE_UUID *uuid; 261 struct tee_ta_session *sess; 262 263 assert((w_unsync_begin + w_unsync_num) <= num_blocks); 264 265 res = tee_ta_get_current_session(&sess); 266 if (res) 267 return res; 268 uuid = &sess->ctx->uuid; 269 270 aux->data_len = 0; 271 memset(aux->data, 0xce, aux->data_alloced); 272 273 res = tee_fs_htree_open(true, hash, uuid, &test_htree_ops, aux, &ht); 274 CHECK_RES(res, goto out); 275 276 /* 277 * Intialize all blocks and verify that they read back as 278 * expected. 279 */ 280 res = do_range(write_block, &ht, 0, num_blocks, salt); 281 CHECK_RES(res, goto out); 282 283 res = do_range(read_block, &ht, 0, num_blocks, salt); 284 CHECK_RES(res, goto out); 285 286 /* 287 * Write all blocks again, but starting from the end using a new 288 * salt, then verify that that read back as expected. 289 */ 290 salt++; 291 res = do_range_backwards(write_block, &ht, 0, num_blocks, salt); 292 CHECK_RES(res, goto out); 293 294 res = do_range(read_block, &ht, 0, num_blocks, salt); 295 CHECK_RES(res, goto out); 296 297 /* 298 * Use a new salt to write all blocks once more and verify that 299 * they read back as expected. 300 */ 301 salt++; 302 res = do_range(write_block, &ht, 0, num_blocks, salt); 303 CHECK_RES(res, goto out); 304 305 res = do_range(read_block, &ht, 0, num_blocks, salt); 306 CHECK_RES(res, goto out); 307 308 /* 309 * Sync the changes of the nodes to memory, verify that all 310 * blocks are read back as expected. 311 */ 312 res = tee_fs_htree_sync_to_storage(&ht, hash); 313 CHECK_RES(res, goto out); 314 315 res = do_range(read_block, &ht, 0, num_blocks, salt); 316 CHECK_RES(res, goto out); 317 318 /* 319 * Close and reopen the hash-tree 320 */ 321 tee_fs_htree_close(&ht); 322 res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht); 323 CHECK_RES(res, goto out); 324 325 /* 326 * Verify that all blocks are read as expected. 327 */ 328 res = do_range(read_block, &ht, 0, num_blocks, salt); 329 CHECK_RES(res, goto out); 330 331 /* 332 * Rewrite a few blocks and verify that all blocks are read as 333 * expected. 334 */ 335 res = do_range_backwards(write_block, &ht, w_unsync_begin, w_unsync_num, 336 salt + 1); 337 CHECK_RES(res, goto out); 338 339 res = do_range(read_block, &ht, 0, w_unsync_begin, salt); 340 CHECK_RES(res, goto out); 341 res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 1); 342 CHECK_RES(res, goto out); 343 res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num, 344 num_blocks - (w_unsync_begin + w_unsync_num), salt); 345 CHECK_RES(res, goto out); 346 347 /* 348 * Rewrite the blocks from above again with another salt and 349 * verify that they are read back as expected. 350 */ 351 res = do_range(write_block, &ht, w_unsync_begin, w_unsync_num, 352 salt + 2); 353 CHECK_RES(res, goto out); 354 355 res = do_range(read_block, &ht, 0, w_unsync_begin, salt); 356 CHECK_RES(res, goto out); 357 res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 2); 358 CHECK_RES(res, goto out); 359 res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num, 360 num_blocks - (w_unsync_begin + w_unsync_num), salt); 361 CHECK_RES(res, goto out); 362 363 /* 364 * Skip tee_fs_htree_sync_to_storage() and call 365 * tee_fs_htree_close() directly to undo the changes since last 366 * call to tee_fs_htree_sync_to_storage(). Reopen the hash-tree 367 * and verify that recent changes indeed was discarded. 368 */ 369 tee_fs_htree_close(&ht); 370 res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht); 371 CHECK_RES(res, goto out); 372 373 res = do_range(read_block, &ht, 0, num_blocks, salt); 374 CHECK_RES(res, goto out); 375 376 /* 377 * Close, reopen and verify that all blocks are read as expected 378 * again but this time based on the counter value in struct 379 * tee_fs_htree_image. 380 */ 381 tee_fs_htree_close(&ht); 382 res = tee_fs_htree_open(false, NULL, uuid, &test_htree_ops, aux, &ht); 383 CHECK_RES(res, goto out); 384 385 res = do_range(read_block, &ht, 0, num_blocks, salt); 386 CHECK_RES(res, goto out); 387 388 out: 389 tee_fs_htree_close(&ht); 390 /* 391 * read_block() returns TEE_ERROR_TIME_NOT_SET in case unexpected 392 * data is read. 393 */ 394 if (res == TEE_ERROR_TIME_NOT_SET) 395 res = TEE_ERROR_SECURITY; 396 return res; 397 } 398 399 static void aux_free(struct test_aux *aux) 400 { 401 if (aux) { 402 free(aux->data); 403 free(aux->block); 404 free(aux); 405 } 406 } 407 408 static struct test_aux *aux_alloc(size_t num_blocks) 409 { 410 struct test_aux *aux; 411 size_t o; 412 size_t sz; 413 414 if (test_get_offs_size(TEE_FS_HTREE_TYPE_BLOCK, num_blocks, 1, &o, &sz)) 415 return NULL; 416 417 aux = calloc(1, sizeof(*aux)); 418 if (!aux) 419 return NULL; 420 421 aux->data_alloced = o + sz; 422 aux->data = malloc(aux->data_alloced); 423 if (!aux->data) 424 goto err; 425 426 aux->block = malloc(TEST_BLOCK_SIZE); 427 if (!aux->block) 428 goto err; 429 430 return aux; 431 err: 432 aux_free(aux); 433 return NULL; 434 435 } 436 437 static TEE_Result test_write_read(size_t num_blocks) 438 { 439 struct test_aux *aux = aux_alloc(num_blocks); 440 TEE_Result res; 441 size_t n; 442 size_t m; 443 size_t o; 444 445 if (!aux) 446 return TEE_ERROR_OUT_OF_MEMORY; 447 448 /* 449 * n is the number of block we're going to initialize/use. 450 * m is the offset from where we'll rewrite blocks and expect 451 * the changes to be visible until tee_fs_htree_close() is called 452 * without a call to tee_fs_htree_sync_to_storage() before. 453 * o is the number of blocks we're rewriting starting at m. 454 */ 455 for (n = 0; n < num_blocks; n += 3) { 456 for (m = 0; m < n; m += 3) { 457 for (o = 0; o < (n - m); o++) { 458 res = htree_test_rewrite(aux, n, m, o); 459 CHECK_RES(res, goto out); 460 o += 2; 461 } 462 } 463 } 464 465 out: 466 aux_free(aux); 467 return res; 468 } 469 470 static TEE_Result test_corrupt_type(const TEE_UUID *uuid, uint8_t *hash, 471 size_t num_blocks, struct test_aux *aux, 472 enum tee_fs_htree_type type, size_t idx) 473 { 474 TEE_Result res; 475 struct test_aux aux2 = *aux; 476 struct tee_fs_htree *ht = NULL; 477 size_t offs; 478 size_t size; 479 size_t size0; 480 size_t n; 481 482 res = test_get_offs_size(type, idx, 0, &offs, &size0); 483 CHECK_RES(res, return res); 484 485 aux2.data = malloc(aux->data_alloced); 486 if (!aux2.data) 487 return TEE_ERROR_OUT_OF_MEMORY; 488 489 n = 0; 490 while (true) { 491 memcpy(aux2.data, aux->data, aux->data_len); 492 493 res = test_get_offs_size(type, idx, 0, &offs, &size); 494 CHECK_RES(res, goto out); 495 aux2.data[offs + n]++; 496 res = test_get_offs_size(type, idx, 1, &offs, &size); 497 CHECK_RES(res, goto out); 498 aux2.data[offs + n]++; 499 500 /* 501 * Errors in head or node is detected by 502 * tee_fs_htree_open() errors in block is detected when 503 * actually read by do_range(read_block) 504 */ 505 res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, 506 &aux2, &ht); 507 if (!res) { 508 res = do_range(read_block, &ht, 0, num_blocks, 1); 509 /* 510 * do_range(read_block,) is supposed to detect the 511 * error. If TEE_ERROR_TIME_NOT_SET is returned 512 * read_block() was acutally able to get some data, 513 * but the data was incorrect. 514 * 515 * If res == TEE_SUCCESS or 516 * res == TEE_ERROR_TIME_NOT_SET 517 * there's some problem with the htree 518 * implementation. 519 */ 520 if (res == TEE_ERROR_TIME_NOT_SET) { 521 EMSG("error: data silently corrupted"); 522 res = TEE_ERROR_SECURITY; 523 goto out; 524 } 525 if (!res) 526 break; 527 tee_fs_htree_close(&ht); 528 } 529 530 /* We've tested the last byte, let's get out of here */ 531 if (n == size0 - 1) 532 break; 533 534 /* Increase n exponentionally after 1 to skip some testing */ 535 if (n) 536 n += n; 537 else 538 n = 1; 539 540 /* Make sure we test the last byte too */ 541 if (n >= size0) 542 n = size0 - 1; 543 } 544 545 if (res) { 546 res = TEE_SUCCESS; 547 } else { 548 EMSG("error: data corruption undetected"); 549 res = TEE_ERROR_SECURITY; 550 } 551 out: 552 free(aux2.data); 553 tee_fs_htree_close(&ht); 554 return res; 555 } 556 557 558 559 static TEE_Result test_corrupt(size_t num_blocks) 560 { 561 TEE_Result res; 562 struct tee_fs_htree *ht = NULL; 563 struct tee_ta_session *sess; 564 uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; 565 const TEE_UUID *uuid; 566 struct test_aux *aux; 567 size_t n; 568 569 res = tee_ta_get_current_session(&sess); 570 if (res) 571 return res; 572 uuid = &sess->ctx->uuid; 573 574 aux = aux_alloc(num_blocks); 575 if (!aux) { 576 res = TEE_ERROR_OUT_OF_MEMORY; 577 goto out; 578 } 579 580 aux->data_len = 0; 581 memset(aux->data, 0xce, aux->data_alloced); 582 583 /* Write the object and close it */ 584 res = tee_fs_htree_open(true, hash, uuid, &test_htree_ops, aux, &ht); 585 CHECK_RES(res, goto out); 586 res = do_range(write_block, &ht, 0, num_blocks, 1); 587 CHECK_RES(res, goto out); 588 res = tee_fs_htree_sync_to_storage(&ht, hash); 589 CHECK_RES(res, goto out); 590 tee_fs_htree_close(&ht); 591 592 /* Verify that the object can be read correctly */ 593 res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht); 594 CHECK_RES(res, goto out); 595 res = do_range(read_block, &ht, 0, num_blocks, 1); 596 CHECK_RES(res, goto out); 597 tee_fs_htree_close(&ht); 598 599 res = test_corrupt_type(uuid, hash, num_blocks, aux, 600 TEE_FS_HTREE_TYPE_HEAD, 0); 601 CHECK_RES(res, goto out); 602 for (n = 0; n < num_blocks; n++) { 603 res = test_corrupt_type(uuid, hash, num_blocks, aux, 604 TEE_FS_HTREE_TYPE_NODE, n); 605 CHECK_RES(res, goto out); 606 } 607 for (n = 0; n < num_blocks; n++) { 608 res = test_corrupt_type(uuid, hash, num_blocks, aux, 609 TEE_FS_HTREE_TYPE_BLOCK, n); 610 CHECK_RES(res, goto out); 611 } 612 613 out: 614 tee_fs_htree_close(&ht); 615 aux_free(aux); 616 return res; 617 } 618 619 TEE_Result core_fs_htree_tests(uint32_t nParamTypes, 620 TEE_Param pParams[TEE_NUM_PARAMS] __unused) 621 { 622 TEE_Result res; 623 624 if (nParamTypes) 625 return TEE_ERROR_BAD_PARAMETERS; 626 627 res = test_write_read(10); 628 if (res) 629 return res; 630 631 return test_corrupt(5); 632 } 633