1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2017, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/ts_manager.h>
8 #include <string.h>
9 #include <tee/fs_htree.h>
10 #include <tee/tee_fs_rpc.h>
11 #include <trace.h>
12 #include <types_ext.h>
13 #include <util.h>
14 
15 #include "misc.h"
16 
17 /*
18  * The smallest blocks size that can hold two struct
19  * tee_fs_htree_node_image or two struct tee_fs_htree_image.
20  */
21 #define TEST_BLOCK_SIZE		144
22 
23 struct test_aux {
24 	uint8_t *data;
25 	size_t data_len;
26 	size_t data_alloced;
27 	uint8_t *block;
28 };
29 
test_get_offs_size(enum tee_fs_htree_type type,size_t idx,uint8_t vers,size_t * offs,size_t * size)30 static TEE_Result test_get_offs_size(enum tee_fs_htree_type type, size_t idx,
31 				     uint8_t vers, size_t *offs, size_t *size)
32 {
33 	const size_t node_size = sizeof(struct tee_fs_htree_node_image);
34 	const size_t block_nodes = TEST_BLOCK_SIZE / (node_size * 2);
35 	size_t pbn = 0;
36 	size_t bidx = 0;
37 
38 	COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE >
39 			    sizeof(struct tee_fs_htree_node_image) * 2);
40 	COMPILE_TIME_ASSERT(TEST_BLOCK_SIZE >
41 			    sizeof(struct tee_fs_htree_image) * 2);
42 
43 	assert(vers == 0 || vers == 1);
44 
45 	/*
46 	 * File layout
47 	 *
48 	 * phys block 0:
49 	 * tee_fs_htree_image vers 0 @ offs = 0
50 	 * tee_fs_htree_image vers 1 @ offs = sizeof(tee_fs_htree_image)
51 	 *
52 	 * phys block 1:
53 	 * tee_fs_htree_node_image 0  vers 0 @ offs = 0
54 	 * tee_fs_htree_node_image 0  vers 1 @ offs = node_size
55 	 *
56 	 * phys block 2:
57 	 * data block 0 vers 0
58 	 *
59 	 * phys block 3:
60 	 * tee_fs_htree_node_image 1  vers 0 @ offs = 0
61 	 * tee_fs_htree_node_image 1  vers 1 @ offs = node_size
62 	 *
63 	 * phys block 4:
64 	 * data block 0 vers 1
65 	 *
66 	 * ...
67 	 */
68 
69 	switch (type) {
70 	case TEE_FS_HTREE_TYPE_HEAD:
71 		*offs = sizeof(struct tee_fs_htree_image) * vers;
72 		*size = sizeof(struct tee_fs_htree_image);
73 		return TEE_SUCCESS;
74 	case TEE_FS_HTREE_TYPE_NODE:
75 		pbn = 1 + ((idx / block_nodes) * block_nodes * 2);
76 		*offs = pbn * TEST_BLOCK_SIZE +
77 			2 * node_size * (idx % block_nodes) +
78 			node_size * vers;
79 		*size = node_size;
80 		return TEE_SUCCESS;
81 	case TEE_FS_HTREE_TYPE_BLOCK:
82 		bidx = 2 * idx + vers;
83 		pbn = 2 + bidx + bidx / (block_nodes * 2 - 1);
84 		*offs = pbn * TEST_BLOCK_SIZE;
85 		*size = TEST_BLOCK_SIZE;
86 		return TEE_SUCCESS;
87 	default:
88 		return TEE_ERROR_GENERIC;
89 	}
90 }
91 
test_read_init(void * aux,struct tee_fs_rpc_operation * op,enum tee_fs_htree_type type,size_t idx,uint8_t vers,void ** data)92 static TEE_Result test_read_init(void *aux, struct tee_fs_rpc_operation *op,
93 				 enum tee_fs_htree_type type, size_t idx,
94 				 uint8_t vers, void **data)
95 {
96 	TEE_Result res = TEE_SUCCESS;
97 	struct test_aux *a = aux;
98 	size_t offs = 0;
99 	size_t sz = 0;
100 
101 	res = test_get_offs_size(type, idx, vers, &offs, &sz);
102 	if (res == TEE_SUCCESS) {
103 		memset(op, 0, sizeof(*op));
104 		op->params[0].u.value.a = (vaddr_t)aux;
105 		op->params[0].u.value.b = offs;
106 		op->params[0].u.value.c = sz;
107 		*data = a->block;
108 	}
109 
110 	return res;
111 }
112 
uint_to_ptr(uintptr_t p)113 static void *uint_to_ptr(uintptr_t p)
114 {
115 	return (void *)p;
116 }
117 
test_read_final(struct tee_fs_rpc_operation * op,size_t * bytes)118 static TEE_Result test_read_final(struct tee_fs_rpc_operation *op,
119 				  size_t *bytes)
120 {
121 	struct test_aux *a = uint_to_ptr(op->params[0].u.value.a);
122 	size_t offs = op->params[0].u.value.b;
123 	size_t sz = op->params[0].u.value.c;
124 
125 	if (offs + sz <= a->data_len)
126 		*bytes = sz;
127 	else if (offs <= a->data_len)
128 		*bytes = a->data_len - offs;
129 	else
130 		*bytes = 0;
131 
132 	memcpy(a->block, a->data + offs, *bytes);
133 	return TEE_SUCCESS;
134 }
135 
test_write_init(void * aux,struct tee_fs_rpc_operation * op,enum tee_fs_htree_type type,size_t idx,uint8_t vers,void ** data)136 static TEE_Result test_write_init(void *aux, struct tee_fs_rpc_operation *op,
137 				  enum tee_fs_htree_type type, size_t idx,
138 				  uint8_t vers, void **data)
139 {
140 	return test_read_init(aux, op, type, idx, vers, data);
141 }
142 
test_write_final(struct tee_fs_rpc_operation * op)143 static TEE_Result test_write_final(struct tee_fs_rpc_operation *op)
144 {
145 	struct test_aux *a = uint_to_ptr(op->params[0].u.value.a);
146 	size_t offs = op->params[0].u.value.b;
147 	size_t sz = op->params[0].u.value.c;
148 	size_t end = offs + sz;
149 
150 	if (end > a->data_alloced) {
151 		EMSG("out of bounds");
152 		return TEE_ERROR_GENERIC;
153 	}
154 
155 	memcpy(a->data + offs, a->block, sz);
156 	if (end > a->data_len)
157 		a->data_len = end;
158 	return TEE_SUCCESS;
159 
160 }
161 
162 static const struct tee_fs_htree_storage test_htree_ops = {
163 	.block_size = TEST_BLOCK_SIZE,
164 	.rpc_read_init = test_read_init,
165 	.rpc_read_final = test_read_final,
166 	.rpc_write_init = test_write_init,
167 	.rpc_write_final = test_write_final,
168 };
169 
170 #define CHECK_RES(res, cleanup)						\
171 		do {							\
172 			TEE_Result _res = (res);			\
173 									\
174 			if (_res != TEE_SUCCESS) {			\
175 				EMSG("error: res = %#" PRIx32, _res);	\
176 				{ cleanup; }				\
177 			}						\
178 		} while (0)
179 
val_from_bn_n_salt(size_t bn,size_t n,uint8_t salt)180 static uint32_t val_from_bn_n_salt(size_t bn, size_t n, uint8_t salt)
181 {
182 	assert(bn < UINT16_MAX);
183 	assert(n < UINT8_MAX);
184 	return SHIFT_U32(n, 16) | SHIFT_U32(bn, 8) | salt;
185 }
186 
write_block(struct tee_fs_htree ** ht,size_t bn,uint8_t salt)187 static TEE_Result write_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt)
188 {
189 	uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)] = { 0 };
190 	size_t n = 0;
191 
192 	for (n = 0; n < ARRAY_SIZE(b); n++)
193 		b[n] = val_from_bn_n_salt(bn, n, salt);
194 
195 	return tee_fs_htree_write_block(ht, bn, b);
196 }
197 
read_block(struct tee_fs_htree ** ht,size_t bn,uint8_t salt)198 static TEE_Result read_block(struct tee_fs_htree **ht, size_t bn, uint8_t salt)
199 {
200 	TEE_Result res = TEE_SUCCESS;
201 	uint32_t b[TEST_BLOCK_SIZE / sizeof(uint32_t)] = { 0 };
202 	size_t n = 0;
203 
204 	res = tee_fs_htree_read_block(ht, bn, b);
205 	if (res != TEE_SUCCESS)
206 		return res;
207 
208 	for (n = 0; n < ARRAY_SIZE(b); n++) {
209 		if (b[n] != val_from_bn_n_salt(bn, n, salt)) {
210 			DMSG("Unpected b[%zu] %#" PRIx32
211 			     "(expected %#" PRIx32 ")",
212 			     n, b[n], val_from_bn_n_salt(bn, n, salt));
213 			return TEE_ERROR_TIME_NOT_SET;
214 		}
215 	}
216 
217 	return TEE_SUCCESS;
218 }
219 
do_range(TEE_Result (* fn)(struct tee_fs_htree ** ht,size_t bn,uint8_t salt),struct tee_fs_htree ** ht,size_t begin,size_t num_blocks,size_t salt)220 static TEE_Result do_range(TEE_Result (*fn)(struct tee_fs_htree **ht,
221 					    size_t bn, uint8_t salt),
222 			   struct tee_fs_htree **ht, size_t begin,
223 			   size_t num_blocks, size_t salt)
224 {
225 	TEE_Result res = TEE_SUCCESS;
226 	size_t n = 0;
227 
228 	for (n = 0; n < num_blocks; n++) {
229 		res = fn(ht, n + begin, salt);
230 		CHECK_RES(res, goto out);
231 	}
232 
233 out:
234 	return res;
235 }
236 
do_range_backwards(TEE_Result (* fn)(struct tee_fs_htree ** ht,size_t bn,uint8_t salt),struct tee_fs_htree ** ht,size_t begin,size_t num_blocks,size_t salt)237 static TEE_Result do_range_backwards(TEE_Result (*fn)(struct tee_fs_htree **ht,
238 						      size_t bn, uint8_t salt),
239 				     struct tee_fs_htree **ht, size_t begin,
240 				     size_t num_blocks, size_t salt)
241 {
242 	TEE_Result res = TEE_SUCCESS;
243 	size_t n = 0;
244 
245 	for (n = 0; n < num_blocks; n++) {
246 		res = fn(ht, num_blocks - 1 - n + begin, salt);
247 		CHECK_RES(res, goto out);
248 	}
249 
250 out:
251 	return res;
252 }
253 
htree_test_rewrite(struct test_aux * aux,size_t num_blocks,size_t w_unsync_begin,size_t w_unsync_num)254 static TEE_Result htree_test_rewrite(struct test_aux *aux, size_t num_blocks,
255 				     size_t w_unsync_begin, size_t w_unsync_num)
256 {
257 	struct ts_session *sess = ts_get_current_session();
258 	const TEE_UUID *uuid = &sess->ctx->uuid;
259 	TEE_Result res = TEE_SUCCESS;
260 	struct tee_fs_htree *ht = NULL;
261 	size_t salt = 23;
262 	uint8_t hash[TEE_FS_HTREE_HASH_SIZE] = { 0 };
263 
264 	assert((w_unsync_begin + w_unsync_num) <= num_blocks);
265 
266 	aux->data_len = 0;
267 	memset(aux->data, 0xce, aux->data_alloced);
268 
269 	res = tee_fs_htree_open(true, hash, uuid, &test_htree_ops, aux, &ht);
270 	CHECK_RES(res, goto out);
271 
272 	/*
273 	 * Intialize all blocks and verify that they read back as
274 	 * expected.
275 	 */
276 	res = do_range(write_block, &ht, 0, num_blocks, salt);
277 	CHECK_RES(res, goto out);
278 
279 	res = do_range(read_block, &ht, 0, num_blocks, salt);
280 	CHECK_RES(res, goto out);
281 
282 	/*
283 	 * Write all blocks again, but starting from the end using a new
284 	 * salt, then verify that that read back as expected.
285 	 */
286 	salt++;
287 	res = do_range_backwards(write_block, &ht, 0, num_blocks, salt);
288 	CHECK_RES(res, goto out);
289 
290 	res = do_range(read_block, &ht, 0, num_blocks, salt);
291 	CHECK_RES(res, goto out);
292 
293 	/*
294 	 * Use a new salt to write all blocks once more and verify that
295 	 * they read back as expected.
296 	 */
297 	salt++;
298 	res = do_range(write_block, &ht, 0, num_blocks, salt);
299 	CHECK_RES(res, goto out);
300 
301 	res = do_range(read_block, &ht, 0, num_blocks, salt);
302 	CHECK_RES(res, goto out);
303 
304 	/*
305 	 * Sync the changes of the nodes to memory, verify that all
306 	 * blocks are read back as expected.
307 	 */
308 	res = tee_fs_htree_sync_to_storage(&ht, hash);
309 	CHECK_RES(res, goto out);
310 
311 	res = do_range(read_block, &ht, 0, num_blocks, salt);
312 	CHECK_RES(res, goto out);
313 
314 	/*
315 	 * Close and reopen the hash-tree
316 	 */
317 	tee_fs_htree_close(&ht);
318 	res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht);
319 	CHECK_RES(res, goto out);
320 
321 	/*
322 	 * Verify that all blocks are read as expected.
323 	 */
324 	res = do_range(read_block, &ht, 0, num_blocks, salt);
325 	CHECK_RES(res, goto out);
326 
327 	/*
328 	 * Rewrite a few blocks and verify that all blocks are read as
329 	 * expected.
330 	 */
331 	res = do_range_backwards(write_block, &ht, w_unsync_begin, w_unsync_num,
332 				 salt + 1);
333 	CHECK_RES(res, goto out);
334 
335 	res = do_range(read_block, &ht, 0, w_unsync_begin, salt);
336 	CHECK_RES(res, goto out);
337 	res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 1);
338 	CHECK_RES(res, goto out);
339 	res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num,
340 			num_blocks - (w_unsync_begin + w_unsync_num), salt);
341 	CHECK_RES(res, goto out);
342 
343 	/*
344 	 * Rewrite the blocks from above again with another salt and
345 	 * verify that they are read back as expected.
346 	 */
347 	res = do_range(write_block, &ht, w_unsync_begin, w_unsync_num,
348 		       salt + 2);
349 	CHECK_RES(res, goto out);
350 
351 	res = do_range(read_block, &ht, 0, w_unsync_begin, salt);
352 	CHECK_RES(res, goto out);
353 	res = do_range(read_block, &ht, w_unsync_begin, w_unsync_num, salt + 2);
354 	CHECK_RES(res, goto out);
355 	res = do_range(read_block, &ht, w_unsync_begin + w_unsync_num,
356 			num_blocks - (w_unsync_begin + w_unsync_num), salt);
357 	CHECK_RES(res, goto out);
358 
359 	/*
360 	 * Skip tee_fs_htree_sync_to_storage() and call
361 	 * tee_fs_htree_close() directly to undo the changes since last
362 	 * call to tee_fs_htree_sync_to_storage().  Reopen the hash-tree
363 	 * and verify that recent changes indeed was discarded.
364 	 */
365 	tee_fs_htree_close(&ht);
366 	res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht);
367 	CHECK_RES(res, goto out);
368 
369 	res = do_range(read_block, &ht, 0, num_blocks, salt);
370 	CHECK_RES(res, goto out);
371 
372 	/*
373 	 * Close, reopen and verify that all blocks are read as expected
374 	 * again but this time based on the counter value in struct
375 	 * tee_fs_htree_image.
376 	 */
377 	tee_fs_htree_close(&ht);
378 	res = tee_fs_htree_open(false, NULL, uuid, &test_htree_ops, aux, &ht);
379 	CHECK_RES(res, goto out);
380 
381 	res = do_range(read_block, &ht, 0, num_blocks, salt);
382 	CHECK_RES(res, goto out);
383 
384 out:
385 	tee_fs_htree_close(&ht);
386 	/*
387 	 * read_block() returns TEE_ERROR_TIME_NOT_SET in case unexpected
388 	 * data is read.
389 	 */
390 	if (res == TEE_ERROR_TIME_NOT_SET)
391 		res = TEE_ERROR_SECURITY;
392 	return res;
393 }
394 
aux_free(struct test_aux * aux)395 static void aux_free(struct test_aux *aux)
396 {
397 	if (aux) {
398 		free(aux->data);
399 		free(aux->block);
400 		free(aux);
401 	}
402 }
403 
aux_alloc(size_t num_blocks)404 static struct test_aux *aux_alloc(size_t num_blocks)
405 {
406 	struct test_aux *aux = NULL;
407 	size_t o = 0;
408 	size_t sz = 0;
409 
410 	if (test_get_offs_size(TEE_FS_HTREE_TYPE_BLOCK, num_blocks, 1, &o, &sz))
411 		return NULL;
412 
413 	aux = calloc(1, sizeof(*aux));
414 	if (!aux)
415 		return NULL;
416 
417 	aux->data_alloced = o + sz;
418 	aux->data = malloc(aux->data_alloced);
419 	if (!aux->data)
420 		goto err;
421 
422 	aux->block = malloc(TEST_BLOCK_SIZE);
423 	if (!aux->block)
424 		goto err;
425 
426 	return aux;
427 err:
428 	aux_free(aux);
429 	return NULL;
430 
431 }
432 
test_write_read(size_t num_blocks)433 static TEE_Result test_write_read(size_t num_blocks)
434 {
435 	struct test_aux *aux = aux_alloc(num_blocks);
436 	TEE_Result res = TEE_SUCCESS;
437 	size_t n = 0;
438 	size_t m = 0;
439 	size_t o = 0;
440 
441 	if (!aux)
442 		return TEE_ERROR_OUT_OF_MEMORY;
443 
444 	/*
445 	 * n is the number of block we're going to initialize/use.
446 	 * m is the offset from where we'll rewrite blocks and expect
447 	 * the changes to be visible until tee_fs_htree_close() is called
448 	 * without a call to tee_fs_htree_sync_to_storage() before.
449 	 * o is the number of blocks we're rewriting starting at m.
450 	 */
451 	for (n = 0; n < num_blocks; n += 3) {
452 		for (m = 0; m < n; m += 3) {
453 			for (o = 0; o < (n - m); o++) {
454 				res = htree_test_rewrite(aux, n, m, o);
455 				CHECK_RES(res, goto out);
456 				o += 2;
457 			}
458 		}
459 	}
460 
461 out:
462 	aux_free(aux);
463 	return res;
464 }
465 
test_corrupt_type(const TEE_UUID * uuid,uint8_t * hash,size_t num_blocks,struct test_aux * aux,enum tee_fs_htree_type type,size_t idx)466 static TEE_Result test_corrupt_type(const TEE_UUID *uuid, uint8_t *hash,
467 				    size_t num_blocks, struct test_aux *aux,
468 				    enum tee_fs_htree_type type, size_t idx)
469 {
470 	TEE_Result res = TEE_SUCCESS;
471 	struct test_aux aux2 = *aux;
472 	struct tee_fs_htree *ht = NULL;
473 	size_t offs = 0;
474 	size_t size = 0;
475 	size_t size0 = 0;
476 	size_t n = 0;
477 
478 	res = test_get_offs_size(type, idx, 0, &offs, &size0);
479 	CHECK_RES(res, return res);
480 
481 	aux2.data = malloc(aux->data_alloced);
482 	if (!aux2.data)
483 		return TEE_ERROR_OUT_OF_MEMORY;
484 
485 	n = 0;
486 	while (true) {
487 		memcpy(aux2.data, aux->data, aux->data_len);
488 
489 		res = test_get_offs_size(type, idx, 0, &offs, &size);
490 		CHECK_RES(res, goto out);
491 		aux2.data[offs + n]++;
492 		res = test_get_offs_size(type, idx, 1, &offs, &size);
493 		CHECK_RES(res, goto out);
494 		aux2.data[offs + n]++;
495 
496 		/*
497 		 * Errors in head or node is detected by
498 		 * tee_fs_htree_open() errors in block is detected when
499 		 * actually read by do_range(read_block)
500 		 */
501 		res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops,
502 					&aux2, &ht);
503 		if (!res) {
504 			res = do_range(read_block, &ht, 0, num_blocks, 1);
505 			/*
506 			 * do_range(read_block,) is supposed to detect the
507 			 * error. If TEE_ERROR_TIME_NOT_SET is returned
508 			 * read_block() was acutally able to get some data,
509 			 * but the data was incorrect.
510 			 *
511 			 * If res == TEE_SUCCESS or
512 			 *    res == TEE_ERROR_TIME_NOT_SET
513 			 * there's some problem with the htree
514 			 * implementation.
515 			 */
516 			if (res == TEE_ERROR_TIME_NOT_SET) {
517 				EMSG("error: data silently corrupted");
518 				res = TEE_ERROR_SECURITY;
519 				goto out;
520 			}
521 			if (!res)
522 				break;
523 			tee_fs_htree_close(&ht);
524 		}
525 
526 		/* We've tested the last byte, let's get out of here */
527 		if (n == size0 - 1)
528 			break;
529 
530 		/* Increase n exponentionally after 1 to skip some testing */
531 		if (n)
532 			n += n;
533 		else
534 			n = 1;
535 
536 		/* Make sure we test the last byte too */
537 		if (n >= size0)
538 			n = size0 - 1;
539 	}
540 
541 	if (res) {
542 		res = TEE_SUCCESS;
543 	} else {
544 		EMSG("error: data corruption undetected");
545 		res = TEE_ERROR_SECURITY;
546 	}
547 out:
548 	free(aux2.data);
549 	tee_fs_htree_close(&ht);
550 	return res;
551 }
552 
553 
554 
test_corrupt(size_t num_blocks)555 static TEE_Result test_corrupt(size_t num_blocks)
556 {
557 	struct ts_session *sess = ts_get_current_session();
558 	const TEE_UUID *uuid = &sess->ctx->uuid;
559 	TEE_Result res = TEE_SUCCESS;
560 	struct tee_fs_htree *ht = NULL;
561 	uint8_t hash[TEE_FS_HTREE_HASH_SIZE] = { 0 };
562 	struct test_aux *aux = NULL;
563 	size_t n = 0;
564 
565 	aux = aux_alloc(num_blocks);
566 	if (!aux) {
567 		res = TEE_ERROR_OUT_OF_MEMORY;
568 		goto out;
569 	}
570 
571 	aux->data_len = 0;
572 	memset(aux->data, 0xce, aux->data_alloced);
573 
574 	/* Write the object and close it */
575 	res = tee_fs_htree_open(true, hash, uuid, &test_htree_ops, aux, &ht);
576 	CHECK_RES(res, goto out);
577 	res = do_range(write_block, &ht, 0, num_blocks, 1);
578 	CHECK_RES(res, goto out);
579 	res = tee_fs_htree_sync_to_storage(&ht, hash);
580 	CHECK_RES(res, goto out);
581 	tee_fs_htree_close(&ht);
582 
583 	/* Verify that the object can be read correctly */
584 	res = tee_fs_htree_open(false, hash, uuid, &test_htree_ops, aux, &ht);
585 	CHECK_RES(res, goto out);
586 	res = do_range(read_block, &ht, 0, num_blocks, 1);
587 	CHECK_RES(res, goto out);
588 	tee_fs_htree_close(&ht);
589 
590 	res = test_corrupt_type(uuid, hash, num_blocks, aux,
591 				TEE_FS_HTREE_TYPE_HEAD, 0);
592 	CHECK_RES(res, goto out);
593 	for (n = 0; n < num_blocks; n++) {
594 		res = test_corrupt_type(uuid, hash, num_blocks, aux,
595 					TEE_FS_HTREE_TYPE_NODE, n);
596 		CHECK_RES(res, goto out);
597 	}
598 	for (n = 0; n < num_blocks; n++) {
599 		res = test_corrupt_type(uuid, hash, num_blocks, aux,
600 					TEE_FS_HTREE_TYPE_BLOCK, n);
601 		CHECK_RES(res, goto out);
602 	}
603 
604 out:
605 	tee_fs_htree_close(&ht);
606 	aux_free(aux);
607 	return res;
608 }
609 
core_fs_htree_tests(uint32_t nParamTypes,TEE_Param pParams[TEE_NUM_PARAMS]__unused)610 TEE_Result core_fs_htree_tests(uint32_t nParamTypes,
611 			       TEE_Param pParams[TEE_NUM_PARAMS] __unused)
612 {
613 	TEE_Result res = TEE_SUCCESS;
614 
615 	if (nParamTypes)
616 		return TEE_ERROR_BAD_PARAMETERS;
617 
618 	res = test_write_read(10);
619 	if (res)
620 		return res;
621 
622 	return test_corrupt(5);
623 }
624