1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2018-2019, 2021 NXP
4 *
5 * Implementation of Hashing functions.
6 */
7 #include <caam_hal_ctrl.h>
8 #include <caam_hash.h>
9 #include <caam_jr.h>
10 #include <caam_utils_dmaobj.h>
11 #include <caam_utils_mem.h>
12 #include <caam_utils_status.h>
13 #include <drvcrypt.h>
14 #include <drvcrypt_hash.h>
15 #include <kernel/panic.h>
16 #include <mm/core_memprot.h>
17 #include <tee/cache.h>
18 #include <string.h>
19 #include <utee_defines.h>
20
21 #include "local.h"
22
23 static const struct crypto_hash_ops hash_ops;
24
25 /*
26 * Maximum number of entries in the descriptor
27 */
28 #define MAX_DESC_ENTRIES 20
29
30 /*
31 * Constants definition of the hash/HMAC algorithm
32 */
33 static const struct hashalg hash_alg[] = {
34 {
35 /* md5 */
36 .type = OP_ALGO(MD5),
37 .size_digest = TEE_MD5_HASH_SIZE,
38 .size_block = TEE_MD5_HASH_SIZE * 4,
39 .size_ctx = HASH_MSG_LEN + TEE_MD5_HASH_SIZE,
40 .size_key = 32,
41 },
42 {
43 /* sha1 */
44 .type = OP_ALGO(SHA1),
45 .size_digest = TEE_SHA1_HASH_SIZE,
46 .size_block = TEE_MAX_HASH_SIZE,
47 .size_ctx = HASH_MSG_LEN + TEE_SHA1_HASH_SIZE,
48 .size_key = 40,
49 },
50 {
51 /* sha224 */
52 .type = OP_ALGO(SHA224),
53 .size_digest = TEE_SHA224_HASH_SIZE,
54 .size_block = TEE_MAX_HASH_SIZE,
55 .size_ctx = HASH_MSG_LEN + TEE_SHA256_HASH_SIZE,
56 .size_key = 64,
57 },
58 {
59 /* sha256 */
60 .type = OP_ALGO(SHA256),
61 .size_digest = TEE_SHA256_HASH_SIZE,
62 .size_block = TEE_MAX_HASH_SIZE,
63 .size_ctx = HASH_MSG_LEN + TEE_SHA256_HASH_SIZE,
64 .size_key = 64,
65 },
66 {
67 /* sha384 */
68 .type = OP_ALGO(SHA384),
69 .size_digest = TEE_SHA384_HASH_SIZE,
70 .size_block = TEE_MAX_HASH_SIZE * 2,
71 .size_ctx = HASH_MSG_LEN + TEE_SHA512_HASH_SIZE,
72 .size_key = 128,
73 },
74 {
75 /* sha512 */
76 .type = OP_ALGO(SHA512),
77 .size_digest = TEE_SHA512_HASH_SIZE,
78 .size_block = TEE_MAX_HASH_SIZE * 2,
79 .size_ctx = HASH_MSG_LEN + TEE_SHA512_HASH_SIZE,
80 .size_key = 128,
81 },
82 };
83
84 /*
85 * Format the hash context to keep the reference to the
86 * operation driver
87 */
88 struct crypto_hash {
89 struct crypto_hash_ctx hash_ctx; /* Crypto Hash API context */
90 struct hashctx *ctx; /* Hash Context */
91 };
92
93 /*
94 * Keep the HW hash limit because after the initialization
95 * of the module, we don't have the CAAM Controller base address
96 * to call the function returning the HW capacity.
97 */
98 static uint8_t caam_hash_limit;
99
100 /*
101 * Returns the reference to the driver context
102 *
103 * @ctx API Context
104 */
to_hash_ctx(struct crypto_hash_ctx * ctx)105 static struct crypto_hash *to_hash_ctx(struct crypto_hash_ctx *ctx)
106 {
107 assert(ctx && ctx->ops == &hash_ops);
108
109 return container_of(ctx, struct crypto_hash, hash_ctx);
110 }
111
112 /*
113 * Add the load key in the CAAM descriptor and clean the key buffer.
114 *
115 * @desc CAAM Descriptor
116 * @key Key to load
117 */
do_desc_load_key(uint32_t * desc,struct caambuf * key)118 static void do_desc_load_key(uint32_t *desc, struct caambuf *key)
119 {
120 HASH_TRACE("Insert Key");
121 caam_desc_add_word(desc, LD_KEY_SPLIT(key->length));
122 caam_desc_add_ptr(desc, key->paddr);
123
124 cache_operation(TEE_CACHECLEAN, key->data, key->length);
125 }
126
127 /*
128 * Free the internal hashing data context
129 *
130 * @ctx [in/out] Caller context variable
131 */
do_free_intern(struct hashctx * ctx)132 static void do_free_intern(struct hashctx *ctx)
133 {
134 HASH_TRACE("Free Context (%p)", ctx);
135
136 if (ctx) {
137 /* Free the descriptor */
138 caam_free_desc(&ctx->descriptor);
139
140 /* Free the Temporary buffer */
141 caam_free_buf(&ctx->blockbuf.buf);
142
143 /* Free the context register */
144 caam_free_buf(&ctx->ctx);
145
146 /* Free the HMAC Key */
147 caam_free_buf(&ctx->key);
148 }
149 }
150
151 /*
152 * Initialization of the Hash operation
153 * Call common initialization operation between hash and HMAC
154 *
155 * @ctx Operation software context
156 */
do_hash_init(struct crypto_hash_ctx * ctx)157 static TEE_Result do_hash_init(struct crypto_hash_ctx *ctx)
158 {
159 struct crypto_hash *hash = to_hash_ctx(ctx);
160
161 return caam_hash_hmac_init(hash->ctx);
162 }
163
164 /*
165 * Update the Hash operation
166 * Call common update operation between hash and HMAC
167 *
168 * @ctx Operation software context
169 * @data Data to hash
170 * @len Data length
171 */
do_hash_update(struct crypto_hash_ctx * ctx,const uint8_t * data,size_t len)172 static TEE_Result do_hash_update(struct crypto_hash_ctx *ctx,
173 const uint8_t *data, size_t len)
174 {
175 struct crypto_hash *hash = to_hash_ctx(ctx);
176
177 return caam_hash_hmac_update(hash->ctx, data, len);
178 }
179
180 /*
181 * Finalize the Hash operation
182 * Call common final operation between hash and HMAC
183 *
184 * @ctx Operation software context
185 * @digest [out] Hash digest buffer
186 * @len Digest buffer length
187 */
do_hash_final(struct crypto_hash_ctx * ctx,uint8_t * digest,size_t len)188 static TEE_Result do_hash_final(struct crypto_hash_ctx *ctx, uint8_t *digest,
189 size_t len)
190 {
191 struct crypto_hash *hash = to_hash_ctx(ctx);
192
193 return caam_hash_hmac_final(hash->ctx, digest, len);
194 }
195
196 /*
197 * Free the SW hashing data context
198 * Call common free operation between hash and HMAC
199 *
200 * @ctx [in/out] Caller context variable
201 */
do_hash_free(struct crypto_hash_ctx * ctx)202 static void do_hash_free(struct crypto_hash_ctx *ctx)
203 {
204 struct crypto_hash *hash = to_hash_ctx(ctx);
205
206 caam_hash_hmac_free(hash->ctx);
207
208 free(hash);
209 }
210
211 /*
212 * Copy Software Hashing Context
213 * Call common copy operation between hash and HMAC
214 *
215 * @dst_ctx [out] Reference the context destination
216 * @src_ctx Reference the context source
217 */
do_hash_copy_state(struct crypto_hash_ctx * dst_ctx,struct crypto_hash_ctx * src_ctx)218 static void do_hash_copy_state(struct crypto_hash_ctx *dst_ctx,
219 struct crypto_hash_ctx *src_ctx)
220 {
221 struct crypto_hash *hash_src = to_hash_ctx(src_ctx);
222 struct crypto_hash *hash_dst = to_hash_ctx(dst_ctx);
223
224 return caam_hash_hmac_copy_state(hash_dst->ctx, hash_src->ctx);
225 }
226
227 /*
228 * Registration of the hash Driver
229 */
230 static const struct crypto_hash_ops hash_ops = {
231 .init = do_hash_init,
232 .update = do_hash_update,
233 .final = do_hash_final,
234 .free_ctx = do_hash_free,
235 .copy_state = do_hash_copy_state,
236 };
237
238 /*
239 * Allocate the internal hashing data context
240 *
241 * @ctx [out] Caller context variable
242 * @algo Algorithm ID
243 */
caam_hash_allocate(struct crypto_hash_ctx ** ctx,uint32_t algo)244 static TEE_Result caam_hash_allocate(struct crypto_hash_ctx **ctx,
245 uint32_t algo)
246 {
247 struct crypto_hash *hash = NULL;
248 struct hashctx *hash_ctx = NULL;
249 const struct hashalg *alg = NULL;
250 TEE_Result ret = TEE_ERROR_GENERIC;
251
252 HASH_TRACE("Allocate Context (%p) algo %" PRId32, ctx, algo);
253
254 *ctx = NULL;
255
256 alg = caam_hash_get_alg(algo);
257 if (!alg)
258 return TEE_ERROR_NOT_IMPLEMENTED;
259
260 hash = calloc(1, sizeof(*hash));
261 if (!hash)
262 return TEE_ERROR_OUT_OF_MEMORY;
263
264 hash_ctx = caam_calloc(sizeof(*hash_ctx));
265 if (!hash_ctx) {
266 ret = TEE_ERROR_OUT_OF_MEMORY;
267 goto err;
268 }
269
270 hash_ctx->alg = alg;
271 hash->hash_ctx.ops = &hash_ops;
272 hash->ctx = hash_ctx;
273
274 *ctx = &hash->hash_ctx;
275
276 ret = caam_hash_hmac_allocate(hash_ctx);
277 if (ret != TEE_SUCCESS)
278 goto err;
279
280 HASH_TRACE("Allocated Context (%p)", hash_ctx);
281
282 return TEE_SUCCESS;
283
284 err:
285 free(hash);
286
287 if (hash_ctx)
288 caam_free(hash_ctx);
289
290 return ret;
291 }
292
caam_hash_hmac_allocate(struct hashctx * ctx)293 TEE_Result caam_hash_hmac_allocate(struct hashctx *ctx)
294 {
295 TEE_Result ret = TEE_ERROR_GENERIC;
296
297 HASH_TRACE("Allocate Context (%p)", ctx);
298
299 /* Allocate the descriptor */
300 ctx->descriptor = caam_calloc_desc(MAX_DESC_ENTRIES);
301 if (!ctx->descriptor) {
302 HASH_TRACE("Allocation context descriptor error");
303 return TEE_ERROR_OUT_OF_MEMORY;
304 }
305
306 /* Initialize the block buffer */
307 ctx->blockbuf.filled = 0;
308 ctx->blockbuf.max = ctx->alg->size_block;
309
310 /* Allocate the CAAM Context register */
311 if (caam_calloc_align_buf(&ctx->ctx, ctx->alg->size_ctx) !=
312 CAAM_NO_ERROR) {
313 HASH_TRACE("Allocation context register error");
314 ret = TEE_ERROR_OUT_OF_MEMORY;
315 goto err;
316 }
317
318 /* Allocate the Hash Key */
319 if (caam_calloc_align_buf(&ctx->key, ctx->alg->size_key) !=
320 CAAM_NO_ERROR) {
321 HASH_TRACE("Allocation context key error");
322 ret = TEE_ERROR_OUT_OF_MEMORY;
323 goto err;
324 }
325
326 cache_operation(TEE_CACHEFLUSH, ctx->ctx.data, ctx->ctx.length);
327
328 /* Ensure buffer length is 0 */
329 ctx->ctx.length = 0;
330
331 return TEE_SUCCESS;
332
333 err:
334 do_free_intern(ctx);
335
336 return ret;
337 }
338
339 /*
340 * Free the SW hashing data context
341 *
342 * @ctx Caller context variable
343 */
caam_hash_hmac_free(struct hashctx * ctx)344 void caam_hash_hmac_free(struct hashctx *ctx)
345 {
346 HASH_TRACE("Free Context (%p)", ctx);
347
348 if (ctx) {
349 do_free_intern(ctx);
350 caam_free(ctx);
351 }
352 }
353
caam_hash_get_alg(uint32_t algo)354 const struct hashalg *caam_hash_get_alg(uint32_t algo)
355 {
356 uint8_t hash_id = TEE_ALG_GET_MAIN_ALG(algo);
357 unsigned int idx = hash_id - TEE_MAIN_ALGO_MD5;
358
359 if (hash_id > caam_hash_limit || idx > ARRAY_SIZE(hash_alg))
360 return NULL;
361
362 return &hash_alg[idx];
363 }
364
caam_hash_hmac_init(struct hashctx * ctx)365 TEE_Result caam_hash_hmac_init(struct hashctx *ctx)
366 {
367 HASH_TRACE("Hash/HMAC Init (%p)", ctx);
368 if (!ctx)
369 return TEE_ERROR_BAD_PARAMETERS;
370
371 /* Initialize the block buffer */
372 ctx->blockbuf.filled = 0;
373 ctx->blockbuf.max = ctx->alg->size_block;
374
375 /* Ensure Context length is 0 */
376 ctx->ctx.length = 0;
377
378 /* Initialize the HMAC Key */
379 ctx->key.length = 0;
380
381 ctx->initialized = true;
382
383 return TEE_SUCCESS;
384 }
385
386 /*
387 * Build and run the CAAM Hash descriptor to update (or start) the
388 * data digest.
389 *
390 * @ctx [in/out] Caller context variable
391 * @src Input data to digest
392 */
do_update_hash(struct hashctx * ctx,struct caamdmaobj * src)393 static TEE_Result do_update_hash(struct hashctx *ctx, struct caamdmaobj *src)
394 {
395 enum caam_status retstatus = CAAM_FAILURE;
396 const struct hashalg *alg = ctx->alg;
397 struct caam_jobctx jobctx = { };
398 uint32_t *desc = ctx->descriptor;
399
400 caam_desc_init(desc);
401 caam_desc_add_word(desc, DESC_HEADER(0));
402
403 /* There are blocks to hash - Create the Descriptor */
404 if (ctx->ctx.length) {
405 HASH_TRACE("Update Operation");
406 /* Algo Operation - Update */
407 caam_desc_add_word(desc, HASH_UPDATE(alg->type));
408 /* Running context to restore */
409 caam_desc_add_word(desc,
410 LD_NOIMM(CLASS_2, REG_CTX, ctx->ctx.length));
411 caam_desc_add_ptr(desc, ctx->ctx.paddr);
412 } else {
413 HASH_TRACE("Init Operation");
414
415 /* Check if there is a key to load it */
416 if (ctx->key.length) {
417 do_desc_load_key(desc, &ctx->key);
418
419 /* Algo Operation - HMAC Init */
420 caam_desc_add_word(desc, HMAC_INIT_PRECOMP(alg->type));
421 } else {
422 /* Algo Operation - Init */
423 caam_desc_add_word(desc, HASH_INIT(alg->type));
424 }
425 ctx->ctx.length = alg->size_ctx;
426 }
427
428 if (ctx->blockbuf.filled) {
429 caam_desc_add_word(desc, FIFO_LD(CLASS_2, MSG, NOACTION,
430 ctx->blockbuf.filled));
431 caam_desc_add_ptr(desc, ctx->blockbuf.buf.paddr);
432 cache_operation(TEE_CACHECLEAN, ctx->blockbuf.buf.data,
433 ctx->blockbuf.filled);
434 }
435
436 caam_desc_fifo_load(desc, src, CLASS_2, MSG, LAST_C2);
437 caam_dmaobj_cache_push(src);
438
439 ctx->blockbuf.filled = 0;
440
441 if (ctx->ctx.length) {
442 /* Save the running context */
443 caam_desc_add_word(desc,
444 ST_NOIMM(CLASS_2, REG_CTX, ctx->ctx.length));
445 caam_desc_add_ptr(desc, ctx->ctx.paddr);
446
447 /* Ensure Context register data are not in cache */
448 cache_operation(TEE_CACHEINVALIDATE, ctx->ctx.data,
449 ctx->ctx.length);
450 }
451
452 HASH_DUMPDESC(desc);
453
454 jobctx.desc = desc;
455 retstatus = caam_jr_enqueue(&jobctx, NULL);
456
457 if (retstatus != CAAM_NO_ERROR) {
458 HASH_TRACE("CAAM Status 0x%08" PRIx32, jobctx.status);
459 return job_status_to_tee_result(jobctx.status);
460 }
461
462 HASH_DUMPBUF("CTX", ctx->ctx.data, ctx->ctx.length);
463
464 return TEE_SUCCESS;
465 }
466
caam_hash_hmac_update(struct hashctx * ctx,const uint8_t * data,size_t len)467 TEE_Result caam_hash_hmac_update(struct hashctx *ctx, const uint8_t *data,
468 size_t len)
469 {
470 TEE_Result ret = TEE_ERROR_GENERIC;
471 enum caam_status retstatus = CAAM_FAILURE;
472 const struct hashalg *alg = NULL;
473 size_t fullsize = 0;
474 size_t size_topost = 0;
475 size_t size_todo = 0;
476 size_t size_done = 0;
477 size_t size_inmade = 0;
478 struct caamdmaobj src = { };
479 size_t offset = 0;
480
481 HASH_TRACE("Hash/HMAC Update (%p) %p - %zu", ctx, data, len);
482
483 if ((!data && len) || !ctx)
484 return TEE_ERROR_BAD_PARAMETERS;
485
486 alg = ctx->alg;
487
488 if (!ctx->ctx.data)
489 return TEE_ERROR_GENERIC;
490
491 HASH_TRACE("Update Type 0x%" PRIX32 " - Input @%p-%zu", alg->type, data,
492 len);
493
494 /* Calculate the total data to be handled */
495 fullsize = ctx->blockbuf.filled + len;
496 size_topost = fullsize % alg->size_block;
497 size_todo = fullsize - size_topost;
498 size_inmade = len - size_topost;
499 HASH_TRACE("FullSize %zu - posted %zu - todo %zu", fullsize,
500 size_topost, size_todo);
501
502 if (!size_todo) {
503 ret = TEE_SUCCESS;
504
505 /* All input data must be saved */
506 if (size_topost)
507 size_inmade = 0;
508
509 goto save_posted;
510 }
511
512 ret = caam_dmaobj_init_input(&src, data, size_inmade);
513 if (ret)
514 goto exit_update;
515
516 ret = caam_dmaobj_prepare(&src, NULL, alg->size_block);
517 if (ret)
518 goto exit_update;
519
520 size_todo = size_inmade;
521
522 for (offset = 0; offset < size_inmade;
523 offset += size_done, size_todo -= size_done) {
524 size_done = size_todo;
525 HASH_TRACE("Do input %zu bytes, offset %zu", size_done, offset);
526
527 ret = caam_dmaobj_sgtbuf_build(&src, &size_done, offset,
528 alg->size_block);
529 if (ret)
530 goto exit_update;
531
532 /*
533 * Need to re-adjust the length of the data if the
534 * posted data block is not empty and the SGT/Buffer
535 * is part of the full input data to do.
536 */
537 if (ctx->blockbuf.filled && size_done < size_todo) {
538 size_done -= ctx->blockbuf.filled;
539 src.sgtbuf.length = size_done;
540 }
541
542 ret = do_update_hash(ctx, &src);
543 if (ret)
544 goto exit_update;
545 }
546
547 save_posted:
548 if (size_topost && data) {
549 struct caambuf srcdata = {
550 .data = (uint8_t *)data,
551 .length = len,
552 };
553
554 HASH_TRACE("Posted %zu of input len %zu made %zu", size_topost,
555 len, size_inmade);
556
557 retstatus = caam_cpy_block_src(&ctx->blockbuf, &srcdata,
558 size_inmade);
559 ret = caam_status_to_tee_result(retstatus);
560 }
561
562 exit_update:
563 caam_dmaobj_free(&src);
564
565 if (ret)
566 do_free_intern(ctx);
567
568 return ret;
569 }
570
caam_hash_hmac_final(struct hashctx * ctx,uint8_t * digest,size_t len)571 TEE_Result caam_hash_hmac_final(struct hashctx *ctx, uint8_t *digest,
572 size_t len)
573 {
574 TEE_Result ret = TEE_ERROR_GENERIC;
575 enum caam_status retstatus = CAAM_FAILURE;
576 const struct hashalg *alg = NULL;
577 struct caam_jobctx jobctx = { };
578 uint32_t *desc = NULL;
579 struct caamdmaobj dig = { };
580
581 HASH_TRACE("Hash/HMAC Final (%p)", ctx);
582
583 if (!digest || !len || !ctx)
584 return TEE_ERROR_BAD_PARAMETERS;
585
586 alg = ctx->alg;
587
588 if (!ctx->ctx.data)
589 return TEE_ERROR_GENERIC;
590
591 ret = caam_dmaobj_output_sgtbuf(&dig, digest, len, alg->size_digest);
592 if (ret)
593 goto out;
594
595 HASH_TRACE("Final Type 0x%" PRIX32 " - Digest %zu", alg->type, len);
596
597 desc = ctx->descriptor;
598 caam_desc_init(desc);
599
600 /* Set the descriptor Header with length */
601 caam_desc_add_word(desc, DESC_HEADER(0));
602
603 /* Load key if any */
604 if (ctx->key.length)
605 do_desc_load_key(desc, &ctx->key);
606
607 if (ctx->ctx.length) {
608 HASH_TRACE("Final Operation");
609
610 if (ctx->key.length)
611 caam_desc_add_word(desc, HMAC_FINAL_PRECOMP(alg->type));
612 else
613 caam_desc_add_word(desc, HASH_FINAL(alg->type));
614
615 /* Running context to restore */
616 caam_desc_add_word(desc,
617 LD_NOIMM(CLASS_2, REG_CTX, ctx->ctx.length));
618 caam_desc_add_ptr(desc, ctx->ctx.paddr);
619
620 cache_operation(TEE_CACHEINVALIDATE, ctx->ctx.data,
621 ctx->ctx.length);
622 HASH_DUMPBUF("CTX", ctx->ctx.data, ctx->ctx.length);
623 ctx->ctx.length = 0;
624 } else {
625 HASH_TRACE("Init/Final Operation");
626 if (ctx->key.length)
627 caam_desc_add_word(desc,
628 HMAC_INITFINAL_PRECOMP(alg->type));
629 else
630 caam_desc_add_word(desc, HASH_INITFINAL(alg->type));
631 }
632
633 HASH_DUMPBUF("Temporary Block", ctx->blockbuf.buf.data,
634 ctx->blockbuf.filled);
635
636 caam_desc_add_word(desc, FIFO_LD_EXT(CLASS_2, MSG, LAST_C2));
637 caam_desc_add_ptr(desc, ctx->blockbuf.buf.paddr);
638 caam_desc_add_word(desc, ctx->blockbuf.filled);
639
640 if (ctx->blockbuf.filled)
641 cache_operation(TEE_CACHECLEAN, ctx->blockbuf.buf.data,
642 ctx->blockbuf.filled);
643
644 ctx->blockbuf.filled = 0;
645
646 /* Save the final digest */
647 caam_desc_store(desc, &dig, CLASS_2, REG_CTX);
648 caam_dmaobj_cache_push(&dig);
649
650 HASH_DUMPDESC(desc);
651
652 jobctx.desc = desc;
653 retstatus = caam_jr_enqueue(&jobctx, NULL);
654
655 if (retstatus == CAAM_NO_ERROR) {
656 caam_dmaobj_copy_to_orig(&dig);
657
658 HASH_DUMPBUF("Digest", digest, (size_t)alg->size_digest);
659
660 ret = TEE_SUCCESS;
661 } else {
662 HASH_TRACE("CAAM Status 0x%08" PRIx32, jobctx.status);
663 ret = job_status_to_tee_result(jobctx.status);
664 }
665
666 out:
667 caam_dmaobj_free(&dig);
668
669 return ret;
670 }
671
caam_hash_hmac_copy_state(struct hashctx * dst,struct hashctx * src)672 void caam_hash_hmac_copy_state(struct hashctx *dst, struct hashctx *src)
673 {
674 HASH_TRACE("Copy State context (%p) to (%p)", src, dst);
675
676 assert(dst && src);
677
678 if (!dst->initialized && caam_hash_hmac_init(dst))
679 panic();
680
681 dst->alg = src->alg;
682
683 if (src->ctx.length) {
684 cache_operation(TEE_CACHEINVALIDATE, src->ctx.data,
685 src->ctx.length);
686 memcpy(dst->ctx.data, src->ctx.data, src->ctx.length);
687 dst->ctx.length = src->ctx.length;
688 cache_operation(TEE_CACHECLEAN, dst->ctx.data, dst->ctx.length);
689 }
690
691 if (src->blockbuf.filled) {
692 struct caambuf srcdata = {
693 .data = src->blockbuf.buf.data,
694 .length = src->blockbuf.filled
695 };
696
697 caam_cpy_block_src(&dst->blockbuf, &srcdata, 0);
698 }
699
700 if (src->key.data) {
701 memcpy(dst->key.data, src->key.data, src->key.length);
702 dst->key.length = src->key.length;
703 }
704 }
705
caam_hash_init(struct caam_jrcfg * caam_jrcfg)706 enum caam_status caam_hash_init(struct caam_jrcfg *caam_jrcfg)
707 {
708 enum caam_status retstatus = CAAM_FAILURE;
709 vaddr_t jr_base = caam_jrcfg->base + caam_jrcfg->offset;
710
711 caam_hash_limit = caam_hal_ctrl_hash_limit(jr_base);
712
713 if (caam_hash_limit != UINT8_MAX) {
714 if (drvcrypt_register_hash(&caam_hash_allocate) == TEE_SUCCESS)
715 retstatus = CAAM_NO_ERROR;
716 }
717
718 return retstatus;
719 }
720