xref: /optee_os/core/pta/tests/fs_htree.c (revision 623b9bd4ec219aa5d6a4eaec16d341e54ff658a9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2017, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/ts_manager.h>
8 #include <string.h>
9 #include <tee/fs_htree.h>
10 #include <tee/tee_fs_rpc.h>
11 #include <trace.h>
12 #include <types_ext.h>
13 #include <util.h>
14 
15 #include "misc.h"
16 
17 /*
18  * The smallest blocks size that can hold two struct
19  * tee_fs_htree_node_image or two struct tee_fs_htree_image.
20  */
21 #define TEST_BLOCK_SIZE		144
22 
23 struct test_aux {
24 	uint8_t *data;
25 	size_t data_len;
26 	size_t data_alloced;
27 	uint8_t *block;
28 };
29 
test_get_offs_size(enum tee_fs_htree_type type,size_t idx,uint8_t vers,size_t * offs,size_t * size)30 static TEE_Result test_get_offs_size(enum tee_fs_htree_type type, size_t idx,
31 				     uint8_t vers, size_t *offs, size_t *size)
32 {
33 	const size_t node_size = sizeof(struct tee_fs_htree_node_image);
34 	const size_t block_nodes = TEST_BLOCK_SIZE / (node_size * 2);
35 	size_t pbn = 0;
36 	size_t bidx = 0;
37 
38 	COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE >
39 			    sizeof(struct tee_fs_htree_node_image) * 2);
40 	COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE >
41 			    sizeof(struct tee_fs_htree_image) * 2);
42 
43 	assert(vers == 0 || vers == 1);
44 
45 	/*
46 	 * File layout
47 	 *
48 	 * phys block 0:
49 	 * tee_fs_htree_image vers 0 @ offs = 0
50 	 * tee_fs_htree_image vers 1 @ offs = sizeof(tee_fs_htree_image)
51 	 *
52 	 * phys block 1:
53 	 * tee_fs_htree_node_image 0  vers 0 @ offs = 0
54 	 * tee_fs_htree_node_image 0  vers 1 @ offs = node_size
55 	 *
56 	 * phys block 2:
57 	 * data block 0 vers 0
58 	 *
59 	 * phys block 3:
60 	 * tee_fs_htree_node_image 1  vers 0 @ offs = 0
61 	 * tee_fs_htree_node_image 1  vers 1 @ offs = node_size
62 	 *
63 	 * phys block 4:
64 	 * data block 0 vers 1
65 	 *
66 	 * ...
67 	 */
68 
69 	switch (type) {
70 	case TEE_FS_HTREE_TYPE_HEAD:
71 		*offs = sizeof(struct tee_fs_htree_image) * vers;
72 		*size = sizeof(struct tee_fs_htree_image);
73 		return TEE_SUCCESS;
74 	case TEE_FS_HTREE_TYPE_NODE:
75 		pbn = 1 + ((idx / block_nodes) * block_nodes * 2);
76 		*offs = pbn * TEST_BLOCK_SIZE +
77 			2 * node_size * (idx % block_nodes) +
78 			node_size * vers;
79 		*size = node_size;
80 		return TEE_SUCCESS;
81 	case TEE_FS_HTREE_TYPE_BLOCK:
82 		bidx = 2 * idx + vers;
83 		pbn = 2 + bidx + bidx / (block_nodes * 2 - 1);
84 		*offs = pbn * TEST_BLOCK_SIZE;
85 		*size = TEST_BLOCK_SIZE;
86 		return TEE_SUCCESS;
87 	default:
88 		return TEE_ERROR_GENERIC;
89 	}
90 }
91 
test_read_init(void * aux,struct tee_fs_rpc_operation * op,enum tee_fs_htree_type type,size_t idx,uint8_t vers,void ** data)92 static TEE_Result test_read_init(void *aux, struct tee_fs_rpc_operation *op,
93 				 enum tee_fs_htree_type type, size_t idx,
94 				 uint8_t vers, void **data)
95 {
96 	TEE_Result res = TEE_SUCCESS;
97 	struct test_aux *a = aux;
98 	size_t offs = 0;
99 	size_t sz = 0;
100 
101 	res = test_get_offs_size(type, idx, vers, &offs, &sz);
102 	if (res == TEE_SUCCESS) {
103 		memset(op, 0, sizeof(*op));
104 		op->params[0].u.value.a = (vaddr_t)aux;
105 		op->params[0].u.value.b = offs;
106 		op->params[0].u.value.c = sz;
107 		*data = a->block;
108 	}
109 
110 	return res;
111 }
112 
uint_to_ptr(uintptr_t p)113 static void *uint_to_ptr(uintptr_t p)
114 {
115 	return (void *)p;
116 }
117 
test_read_final(struct tee_fs_rpc_operation * op,size_t * bytes)118 static TEE_Result test_read_final(struct tee_fs_rpc_operation *op,
119 				  size_t *bytes)
120 {
121 	struct test_aux *a = uint_to_ptr(op->params[0].u.value.a);
122 	size_t offs = op->params[0].u.value.b;
123 	size_t sz = op->params[0].u.value.c;
124 
125 	if (offs + sz <= a->data_len)
126 		*bytes = sz;
127 	else if (offs <= a->data_len)
128 		*bytes = a->data_len - offs;
129 	else
130 		*bytes = 0;
131 
132 	memcpy(a->block, a->data + offs, *bytes);
133 	return TEE_SUCCESS;
134 }
135 
test_write_init(void * aux,struct tee_fs_rpc_operation * op,enum tee_fs_htree_type type,size_t idx,uint8_t vers,void ** data)136 static TEE_Result test_write_init(void *aux, struct tee_fs_rpc_operation *op,
137 				  enum tee_fs_htree_type type, size_t idx,
138 				  uint8_t vers, void **data)
139 {
140 	return test_read_init(aux, op, type, idx, vers, data);
141 }
142 
test_write_final(struct tee_fs_rpc_operation * op)143 static TEE_Result test_write_final(struct tee_fs_rpc_operation *op)
144 {
145 	struct test_aux *a = uint_to_ptr(op->params[0].u.value.a);
146 	size_t offs = op->params[0].u.value.b;
147 	size_t sz = op->params[0].u.value.c;
148 	size_t end = offs + sz;
149 
150 	if (end > a->data_alloced) {
151 		EMSG("out of bounds");
152 		return TEE_ERROR_GENERIC;
153 	}
154 
155 	memcpy(a->data + offs, a->block, sz);
156 	if (end > a->data_len)
157 		a->data_len = end;
158 	return TEE_SUCCESS;
159 
160 }
161 
162 static const struct tee_fs_htree_storage test_htree_ops = {
163 	.block_size = TEST_BLOCK_SIZE,
164 	.rpc_read_init = test_read_init,
165 	.rpc_read_final = test_read_final,
166 	.rpc_write_init = test_write_init,
167 	.rpc_write_final = test_write_final,
168 };
169 
170 #define CHECK_RES(res, cleanup)						\
171 		do {							\
172 			TEE_Result _res = (res);			\
173 									\
174 			if (_res != TEE_SUCCESS) {			\
175 				EMSG("error: res = %#" PRIx32, _res);	\
176 				{ cleanup; }				\
177 			}						\
178 		} while (0)
179 
val_from_bn_n_salt(size_t bn,size_t n,uint8_t salt)180 static uint32_t val_from_bn_n_salt(size_t bn, size_t n, uint8_t salt)
181 {
182 	assert(bn < UINT16_MAX);
183 	assert(n < UINT8_MAX);
184 	return SHIFT_U32(n, 16) | SHIFT_U32(bn, 8) | salt;
185 }
186 
write_block(struct tee_fs_htree ** ht,size_t bn,uint8_t salt)187 static TEE_Result write_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt)
188 {
189 	uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)] = { 0 };
190 	size_t n = 0;
191 
192 	for (n = 0; n < ARRAY_SIZE(b); n++)
193 		b[n] = val_from_bn_n_salt(bn, n, salt);
194 
195 	return tee_fs_htree_write_block(ht, bn, b);
196 }
197 
read_block(struct tee_fs_htree ** ht,size_t bn,uint8_t salt)198 static TEE_Result read_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt)
199 {
200 	TEE_Result res = TEE_SUCCESS;
201 	uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)] = { 0 };
202 	size_t n = 0;
203 
204 	res = tee_fs_htree_read_block(ht, bn, b);
205 	if (res != TEE_SUCCESS)
206 		return res;
207 
208 	for (n = 0; n < ARRAY_SIZE(b); n++) {
209 		if (b[n] != val_from_bn_n_salt(bn, n, salt)) {
210 			DMSG("Unpected b[%zu] %#" PRIx32
211 			     "(expected %#" PRIx32 ")",
212 			     n, b[n], val_from_bn_n_salt(bn, n, salt));
213 			return TEE_ERROR_TIME_NOT_SET;
214 		}
215 	}
216 
217 	return TEE_SUCCESS;
218 }
219 
do_range(TEE_Result (* fn)(struct tee_fs_htree ** ht,size_t bn,uint8_t salt),struct tee_fs_htree ** ht,size_t begin,size_t num_blocks,size_t salt)220 static TEE_Result do_range(TEE_Result (*fn)(struct tee_fs_htree **ht,
221 					    size_t bn, uint8_t salt),
222 			   struct tee_fs_htree **ht, size_t begin,
223 			   size_t num_blocks, size_t salt)
224 {
225 	TEE_Result res = TEE_SUCCESS;
226 	size_t n = 0;
227 
228 	for (n = 0; n < num_blocks; n++) {
229 		res = fn(ht, n + begin, salt);
230 		CHECK_RES(res, goto out);
231 	}
232 
233 out:
234 	return res;
235 }
236 
do_range_backwards(TEE_Result (* fn)(struct tee_fs_htree ** ht,size_t bn,uint8_t salt),struct tee_fs_htree ** ht,size_t begin,size_t num_blocks,size_t salt)237 static TEE_Result do_range_backwards(TEE_Result (*fn)(struct tee_fs_htree **ht,
238 						      size_t bn, uint8_t salt),
239 				     struct tee_fs_htree **ht, size_t begin,
240 				     size_t num_blocks, size_t salt)
241 {
242 	TEE_Result res = TEE_SUCCESS;
243 	size_t n = 0;
244 
245 	for (n = 0; n < num_blocks; n++) {
246 		res = fn(ht, num_blocks - 1 - n + begin, salt);
247 		CHECK_RES(res, goto out);
248 	}
249 
250 out:
251 	return res;
252 }
253 
htree_test_rewrite(struct test_aux * aux,size_t num_blocks,size_t w_unsync_begin,size_t w_unsync_num)254 static TEE_Result htree_test_rewrite(struct test_aux *aux, size_t num_blocks,
255 				     size_t w_unsync_begin, size_t w_unsync_num)
256 {
257 	struct ts_session *sess = ts_get_current_session();
258 	const TEE_UUID *uuid = &sess->ctx->uuid;
259 	TEE_Result res = TEE_SUCCESS;
260 	struct tee_fs_htree *ht = NULL;
261 	size_t salt = 23;
262 	uint8_t hash[TEE_FS_HTREE_HASH_SIZE] = { 0 };
263 
264 	assert((w_unsync_begin + w_unsync_num) <= num_blocks);
265 
266 	aux->data_len = 0;
267 	memset(aux->data, 0xce, aux->data_alloced);
268 
269 	res = tee_fs_htree_open(true, hash, 0, uuid, &test_htree_ops, aux, &ht);
270 	CHECK_RES(res, goto out);
271 
272 	/*
273 	 * Intialize all blocks and verify that they read back as
274 	 * expected.
275 	 */
276 	res = do_range(write_block, &ht, 0, num_blocks, salt);
277 	CHECK_RES(res, goto out);
278 
279 	res = do_range(read_block, &ht, 0, num_blocks, salt);
280 	CHECK_RES(res, goto out);
281 
282 	/*
283 	 * Write all blocks again, but starting from the end using a new
284 	 * salt, then verify that that read back as expected.
285 	 */
286 	salt++;
287 	res = do_range_backwards(write_block, &ht, 0, num_blocks, salt);
288 	CHECK_RES(res, goto out);
289 
290 	res = do_range(read_block, &ht, 0, num_blocks, salt);
291 	CHECK_RES(res, goto out);
292 
293 	/*
294 	 * Use a new salt to write all blocks once more and verify that
295 	 * they read back as expected.
296 	 */
297 	salt++;
298 	res = do_range(write_block, &ht, 0, num_blocks, salt);
299 	CHECK_RES(res, goto out);
300 
301 	res = do_range(read_block, &ht, 0, num_blocks, salt);
302 	CHECK_RES(res, goto out);
303 
304 	/*
305 	 * Sync the changes of the nodes to memory, verify that all
306 	 * blocks are read back as expected.
307 	 */
308 	res = tee_fs_htree_sync_to_storage(&ht, hash, NULL);
309 	CHECK_RES(res, goto out);
310 
311 	res = do_range(read_block, &ht, 0, num_blocks, salt);
312 	CHECK_RES(res, goto out);
313 
314 	/*
315 	 * Close and reopen the hash-tree
316 	 */
317 	tee_fs_htree_close(&ht);
318 	res = tee_fs_htree_open(false, hash, 0, uuid, &test_htree_ops, aux,
319 				&ht);
320 	CHECK_RES(res, goto out);
321 
322 	/*
323 	 * Verify that all blocks are read as expected.
324 	 */
325 	res = do_range(read_block, &ht, 0, num_blocks, salt);
326 	CHECK_RES(res, goto out);
327 
328 	/*
329 	 * Rewrite a few blocks and verify that all blocks are read as
330 	 * expected.
331 	 */
332 	res = do_range_backwards(write_block, &ht, w_unsync_begin, w_unsync_num,
333 				 salt + 1);
334 	CHECK_RES(res, goto out);
335 
336 	res = do_range(read_block, &ht, 0, w_unsync_begin, salt);
337 	CHECK_RES(res, goto out);
338 	res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 1);
339 	CHECK_RES(res, goto out);
340 	res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num,
341 			num_blocks - (w_unsync_begin + w_unsync_num), salt);
342 	CHECK_RES(res, goto out);
343 
344 	/*
345 	 * Rewrite the blocks from above again with another salt and
346 	 * verify that they are read back as expected.
347 	 */
348 	res = do_range(write_block, &ht, w_unsync_begin, w_unsync_num,
349 		       salt + 2);
350 	CHECK_RES(res, goto out);
351 
352 	res = do_range(read_block, &ht, 0, w_unsync_begin, salt);
353 	CHECK_RES(res, goto out);
354 	res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 2);
355 	CHECK_RES(res, goto out);
356 	res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num,
357 			num_blocks - (w_unsync_begin + w_unsync_num), salt);
358 	CHECK_RES(res, goto out);
359 
360 	/*
361 	 * Skip tee_fs_htree_sync_to_storage() and call
362 	 * tee_fs_htree_close() directly to undo the changes since last
363 	 * call to tee_fs_htree_sync_to_storage().  Reopen the hash-tree
364 	 * and verify that recent changes indeed was discarded.
365 	 */
366 	tee_fs_htree_close(&ht);
367 	res = tee_fs_htree_open(false, hash, 0, uuid, &test_htree_ops, aux,
368 				&ht);
369 	CHECK_RES(res, goto out);
370 
371 	res = do_range(read_block, &ht, 0, num_blocks, salt);
372 	CHECK_RES(res, goto out);
373 
374 	/*
375 	 * Close, reopen and verify that all blocks are read as expected
376 	 * again but this time based on the counter value in struct
377 	 * tee_fs_htree_image.
378 	 */
379 	tee_fs_htree_close(&ht);
380 	res = tee_fs_htree_open(false, NULL, 0, uuid, &test_htree_ops, aux,
381 				&ht);
382 	CHECK_RES(res, goto out);
383 
384 	res = do_range(read_block, &ht, 0, num_blocks, salt);
385 	CHECK_RES(res, goto out);
386 
387 out:
388 	tee_fs_htree_close(&ht);
389 	/*
390 	 * read_block() returns TEE_ERROR_TIME_NOT_SET in case unexpected
391 	 * data is read.
392 	 */
393 	if (res == TEE_ERROR_TIME_NOT_SET)
394 		res = TEE_ERROR_SECURITY;
395 	return res;
396 }
397 
aux_free(struct test_aux * aux)398 static void aux_free(struct test_aux *aux)
399 {
400 	if (aux) {
401 		free(aux->data);
402 		free(aux->block);
403 		free(aux);
404 	}
405 }
406 
aux_alloc(size_t num_blocks)407 static struct test_aux *aux_alloc(size_t num_blocks)
408 {
409 	struct test_aux *aux = NULL;
410 	size_t o = 0;
411 	size_t sz = 0;
412 
413 	if (test_get_offs_size(TEE_FS_HTREE_TYPE_BLOCK, num_blocks, 1, &o, &sz))
414 		return NULL;
415 
416 	aux = calloc(1, sizeof(*aux));
417 	if (!aux)
418 		return NULL;
419 
420 	aux->data_alloced = o + sz;
421 	aux->data = malloc(aux->data_alloced);
422 	if (!aux->data)
423 		goto err;
424 
425 	aux->block = malloc(TEST_BLOCK_SIZE);
426 	if (!aux->block)
427 		goto err;
428 
429 	return aux;
430 err:
431 	aux_free(aux);
432 	return NULL;
433 
434 }
435 
test_write_read(size_t num_blocks)436 static TEE_Result test_write_read(size_t num_blocks)
437 {
438 	struct test_aux *aux = aux_alloc(num_blocks);
439 	TEE_Result res = TEE_SUCCESS;
440 	size_t n = 0;
441 	size_t m = 0;
442 	size_t o = 0;
443 
444 	if (!aux)
445 		return TEE_ERROR_OUT_OF_MEMORY;
446 
447 	/*
448 	 * n is the number of block we're going to initialize/use.
449 	 * m is the offset from where we'll rewrite blocks and expect
450 	 * the changes to be visible until tee_fs_htree_close() is called
451 	 * without a call to tee_fs_htree_sync_to_storage() before.
452 	 * o is the number of blocks we're rewriting starting at m.
453 	 */
454 	for (n = 0; n < num_blocks; n += 3) {
455 		for (m = 0; m < n; m += 3) {
456 			for (o = 0; o < (n - m); o++) {
457 				res = htree_test_rewrite(aux, n, m, o);
458 				CHECK_RES(res, goto out);
459 				o += 2;
460 			}
461 		}
462 	}
463 
464 out:
465 	aux_free(aux);
466 	return res;
467 }
468 
test_corrupt_type(const TEE_UUID * uuid,uint8_t * hash,size_t num_blocks,struct test_aux * aux,enum tee_fs_htree_type type,size_t idx)469 static TEE_Result test_corrupt_type(const TEE_UUID *uuid, uint8_t *hash,
470 				    size_t num_blocks, struct test_aux *aux,
471 				    enum tee_fs_htree_type type, size_t idx)
472 {
473 	TEE_Result res = TEE_SUCCESS;
474 	struct test_aux aux2 = *aux;
475 	struct tee_fs_htree *ht = NULL;
476 	size_t offs = 0;
477 	size_t size = 0;
478 	size_t size0 = 0;
479 	size_t n = 0;
480 
481 	res = test_get_offs_size(type, idx, 0, &offs, &size0);
482 	CHECK_RES(res, return res);
483 
484 	aux2.data = malloc(aux->data_alloced);
485 	if (!aux2.data)
486 		return TEE_ERROR_OUT_OF_MEMORY;
487 
488 	n = 0;
489 	while (true) {
490 		memcpy(aux2.data, aux->data, aux->data_len);
491 
492 		res = test_get_offs_size(type, idx, 0, &offs, &size);
493 		CHECK_RES(res, goto out);
494 		aux2.data[offs + n]++;
495 		res = test_get_offs_size(type, idx, 1, &offs, &size);
496 		CHECK_RES(res, goto out);
497 		aux2.data[offs + n]++;
498 
499 		/*
500 		 * Errors in head or node is detected by
501 		 * tee_fs_htree_open() errors in block is detected when
502 		 * actually read by do_range(read_block)
503 		 */
504 		res = tee_fs_htree_open(false, hash, 0, uuid, &test_htree_ops,
505 					&aux2, &ht);
506 		if (!res) {
507 			res = do_range(read_block, &ht, 0, num_blocks, 1);
508 			/*
509 			 * do_range(read_block,) is supposed to detect the
510 			 * error. If TEE_ERROR_TIME_NOT_SET is returned
511 			 * read_block() was acutally able to get some data,
512 			 * but the data was incorrect.
513 			 *
514 			 * If res == TEE_SUCCESS or
515 			 *    res == TEE_ERROR_TIME_NOT_SET
516 			 * there's some problem with the htree
517 			 * implementation.
518 			 */
519 			if (res == TEE_ERROR_TIME_NOT_SET) {
520 				EMSG("error: data silently corrupted");
521 				res = TEE_ERROR_SECURITY;
522 				goto out;
523 			}
524 			if (!res)
525 				break;
526 			tee_fs_htree_close(&ht);
527 		}
528 
529 		/* We've tested the last byte, let's get out of here */
530 		if (n == size0 - 1)
531 			break;
532 
533 		/* Increase n exponentionally after 1 to skip some testing */
534 		if (n)
535 			n += n;
536 		else
537 			n = 1;
538 
539 		/* Make sure we test the last byte too */
540 		if (n >= size0)
541 			n = size0 - 1;
542 	}
543 
544 	if (res) {
545 		res = TEE_SUCCESS;
546 	} else {
547 		EMSG("error: data corruption undetected");
548 		res = TEE_ERROR_SECURITY;
549 	}
550 out:
551 	free(aux2.data);
552 	tee_fs_htree_close(&ht);
553 	return res;
554 }
555 
556 
557 
test_corrupt(size_t num_blocks)558 static TEE_Result test_corrupt(size_t num_blocks)
559 {
560 	struct ts_session *sess = ts_get_current_session();
561 	const TEE_UUID *uuid = &sess->ctx->uuid;
562 	TEE_Result res = TEE_SUCCESS;
563 	struct tee_fs_htree *ht = NULL;
564 	uint8_t hash[TEE_FS_HTREE_HASH_SIZE] = { 0 };
565 	struct test_aux *aux = NULL;
566 	size_t n = 0;
567 
568 	aux = aux_alloc(num_blocks);
569 	if (!aux) {
570 		res = TEE_ERROR_OUT_OF_MEMORY;
571 		goto out;
572 	}
573 
574 	aux->data_len = 0;
575 	memset(aux->data, 0xce, aux->data_alloced);
576 
577 	/* Write the object and close it */
578 	res = tee_fs_htree_open(true, hash, 0, uuid, &test_htree_ops, aux, &ht);
579 	CHECK_RES(res, goto out);
580 	res = do_range(write_block, &ht, 0, num_blocks, 1);
581 	CHECK_RES(res, goto out);
582 	res = tee_fs_htree_sync_to_storage(&ht, hash, NULL);
583 	CHECK_RES(res, goto out);
584 	tee_fs_htree_close(&ht);
585 
586 	/* Verify that the object can be read correctly */
587 	res = tee_fs_htree_open(false, hash, 0, uuid, &test_htree_ops, aux,
588 				&ht);
589 	CHECK_RES(res, goto out);
590 	res = do_range(read_block, &ht, 0, num_blocks, 1);
591 	CHECK_RES(res, goto out);
592 	tee_fs_htree_close(&ht);
593 
594 	res = test_corrupt_type(uuid, hash, num_blocks, aux,
595 				TEE_FS_HTREE_TYPE_HEAD, 0);
596 	CHECK_RES(res, goto out);
597 	for (n = 0; n < num_blocks; n++) {
598 		res = test_corrupt_type(uuid, hash, num_blocks, aux,
599 					TEE_FS_HTREE_TYPE_NODE, n);
600 		CHECK_RES(res, goto out);
601 	}
602 	for (n = 0; n < num_blocks; n++) {
603 		res = test_corrupt_type(uuid, hash, num_blocks, aux,
604 					TEE_FS_HTREE_TYPE_BLOCK, n);
605 		CHECK_RES(res, goto out);
606 	}
607 
608 out:
609 	tee_fs_htree_close(&ht);
610 	aux_free(aux);
611 	return res;
612 }
613 
core_fs_htree_tests(uint32_t nParamTypes,TEE_Param pParams[TEE_NUM_PARAMS]__unused)614 TEE_Result core_fs_htree_tests(uint32_t nParamTypes,
615 			       TEE_Param pParams[TEE_NUM_PARAMS] __unused)
616 {
617 	TEE_Result res = TEE_SUCCESS;
618 
619 	if (nParamTypes)
620 		return TEE_ERROR_BAD_PARAMETERS;
621 
622 	res = test_write_read(10);
623 	if (res)
624 		return res;
625 
626 	return test_corrupt(5);
627 }
628