1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2014 Freescale Semiconductor, Inc.
4 * Copyright 2021 NXP
5 */
6
7 #include <cpu_func.h>
8 #include <log.h>
9 #include <malloc.h>
10 #include <memalign.h>
11 #include "jobdesc.h"
12 #include "desc.h"
13 #include "jr.h"
14 #include "fsl_hash.h"
15 #include <hw_sha.h>
16 #include <asm/cache.h>
17 #include <linux/errno.h>
18
19 #define CRYPTO_MAX_ALG_NAME 80
20 #define SHA1_DIGEST_SIZE 20
21 #define SHA256_DIGEST_SIZE 32
22
23 struct caam_hash_template {
24 char name[CRYPTO_MAX_ALG_NAME];
25 unsigned int digestsize;
26 u32 alg_type;
27 };
28
29 enum caam_hash_algos {
30 SHA1 = 0,
31 SHA256
32 };
33
34 static struct caam_hash_template driver_hash[] = {
35 {
36 .name = "sha1",
37 .digestsize = SHA1_DIGEST_SIZE,
38 .alg_type = OP_ALG_ALGSEL_SHA1,
39 },
40 {
41 .name = "sha256",
42 .digestsize = SHA256_DIGEST_SIZE,
43 .alg_type = OP_ALG_ALGSEL_SHA256,
44 },
45 };
46
get_hash_type(struct hash_algo * algo)47 static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
48 {
49 if (!strcmp(algo->name, driver_hash[SHA1].name))
50 return SHA1;
51 else
52 return SHA256;
53 }
54
55 /* Create the context for progressive hashing using h/w acceleration.
56 *
57 * @ctxp: Pointer to the pointer of the context for hashing
58 * @caam_algo: Enum for SHA1 or SHA256
59 * Return: 0 if ok, -ENOMEM on error
60 */
caam_hash_init(void ** ctxp,enum caam_hash_algos caam_algo)61 static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
62 {
63 *ctxp = calloc(1, sizeof(struct sha_ctx));
64 if (*ctxp == NULL) {
65 debug("Cannot allocate memory for context\n");
66 return -ENOMEM;
67 }
68 return 0;
69 }
70
71 /*
72 * Update sg table for progressive hashing using h/w acceleration
73 *
74 * The context is freed by this function if an error occurs.
75 * We support at most 32 Scatter/Gather Entries.
76 *
77 * @hash_ctx: Pointer to the context for hashing
78 * @buf: Pointer to the buffer being hashed
79 * @size: Size of the buffer being hashed
80 * @is_last: 1 if this is the last update; 0 otherwise
81 * @caam_algo: Enum for SHA1 or SHA256
82 * Return: 0 if ok, -EINVAL on error
83 */
caam_hash_update(void * hash_ctx,const void * buf,unsigned int size,int is_last,enum caam_hash_algos caam_algo)84 static int caam_hash_update(void *hash_ctx, const void *buf,
85 unsigned int size, int is_last,
86 enum caam_hash_algos caam_algo)
87 {
88 uint32_t final;
89 caam_dma_addr_t addr = virt_to_phys((void *)buf);
90 struct sha_ctx *ctx = hash_ctx;
91
92 if (ctx->sg_num >= MAX_SG_32) {
93 free(ctx);
94 return -EINVAL;
95 }
96
97 #ifdef CONFIG_CAAM_64BIT
98 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
99 #else
100 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
101 #endif
102 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (caam_dma_addr_t)addr);
103
104 sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
105 (size & SG_ENTRY_LENGTH_MASK));
106
107 ctx->sg_num++;
108
109 if (is_last) {
110 final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
111 SG_ENTRY_FINAL_BIT;
112 sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
113 }
114
115 return 0;
116 }
117
118 /*
119 * Perform progressive hashing on the given buffer and copy hash at
120 * destination buffer
121 *
122 * The context is freed after successful completion of hash operation.
123 * In case of failure, context is not freed.
124 * @hash_ctx: Pointer to the context for hashing
125 * @dest_buf: Pointer to the destination buffer where hash is to be copied
126 * @size: Size of the buffer being hashed
127 * @caam_algo: Enum for SHA1 or SHA256
128 * Return: 0 if ok, -EINVAL on error
129 */
caam_hash_finish(void * hash_ctx,void * dest_buf,int size,enum caam_hash_algos caam_algo)130 static int caam_hash_finish(void *hash_ctx, void *dest_buf,
131 int size, enum caam_hash_algos caam_algo)
132 {
133 uint32_t len = 0, sg_entry_len;
134 struct sha_ctx *ctx = hash_ctx;
135 int i = 0, ret = 0;
136 caam_dma_addr_t addr;
137
138 if (size < driver_hash[caam_algo].digestsize) {
139 return -EINVAL;
140 }
141
142 flush_dcache_range((ulong)ctx->sg_tbl,
143 (ulong)(ctx->sg_tbl) + (ctx->sg_num * sizeof(struct sg_entry)));
144 for (i = 0; i < ctx->sg_num; i++) {
145 sg_entry_len = (sec_in32(&ctx->sg_tbl[i].len_flag) &
146 SG_ENTRY_LENGTH_MASK);
147 len += sg_entry_len;
148 #ifdef CONFIG_CAAM_64BIT
149 addr = sec_in32(&ctx->sg_tbl[i].addr_hi);
150 addr = (addr << 32) | sec_in32(&ctx->sg_tbl[i].addr_lo);
151 #else
152 addr = sec_in32(&ctx->sg_tbl[i].addr_lo);
153 #endif
154 flush_dcache_range(addr, addr + sg_entry_len);
155 }
156 inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
157 ctx->hash,
158 driver_hash[caam_algo].alg_type,
159 driver_hash[caam_algo].digestsize,
160 1);
161
162 flush_dcache_range((ulong)ctx->sha_desc,
163 (ulong)(ctx->sha_desc) + (sizeof(uint32_t) * MAX_CAAM_DESCSIZE));
164 flush_dcache_range((ulong)ctx->hash,
165 (ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
166
167 ret = run_descriptor_jr(ctx->sha_desc);
168
169 if (ret) {
170 debug("Error %x\n", ret);
171 return ret;
172 } else {
173 invalidate_dcache_range((ulong)ctx->hash,
174 (ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
175 memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
176 }
177 free(ctx);
178 return ret;
179 }
180
caam_hash(const unsigned char * pbuf,unsigned int buf_len,unsigned char * pout,enum caam_hash_algos algo)181 int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
182 unsigned char *pout, enum caam_hash_algos algo)
183 {
184 int ret = 0;
185 uint32_t *desc;
186 unsigned long pbuf_aligned;
187 unsigned int size;
188
189 desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
190 if (!desc) {
191 debug("Not enough memory for descriptor allocation\n");
192 return -ENOMEM;
193 }
194
195 pbuf_aligned = ALIGN_DOWN((unsigned long)pbuf, ARCH_DMA_MINALIGN);
196 size = ALIGN(buf_len + ((unsigned long)pbuf - pbuf_aligned), ARCH_DMA_MINALIGN);
197 flush_dcache_range(pbuf_aligned, pbuf_aligned + size);
198
199 inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
200 driver_hash[algo].alg_type,
201 driver_hash[algo].digestsize,
202 0);
203
204 size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
205 flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
206 size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
207 invalidate_dcache_range((unsigned long)pout, (unsigned long)pout + size);
208
209 ret = run_descriptor_jr(desc);
210
211 size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
212 invalidate_dcache_range((unsigned long)pout,
213 (unsigned long)pout + size);
214
215 free(desc);
216 return ret;
217 }
218
hw_sha256(const unsigned char * pbuf,unsigned int buf_len,unsigned char * pout,unsigned int chunk_size)219 void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
220 unsigned char *pout, unsigned int chunk_size)
221 {
222 if (caam_hash(pbuf, buf_len, pout, SHA256))
223 printf("CAAM was not setup properly or it is faulty\n");
224 }
225
hw_sha1(const unsigned char * pbuf,unsigned int buf_len,unsigned char * pout,unsigned int chunk_size)226 void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
227 unsigned char *pout, unsigned int chunk_size)
228 {
229 if (caam_hash(pbuf, buf_len, pout, SHA1))
230 printf("CAAM was not setup properly or it is faulty\n");
231 }
232
hw_sha_init(struct hash_algo * algo,void ** ctxp)233 int hw_sha_init(struct hash_algo *algo, void **ctxp)
234 {
235 return caam_hash_init(ctxp, get_hash_type(algo));
236 }
237
hw_sha_update(struct hash_algo * algo,void * ctx,const void * buf,unsigned int size,int is_last)238 int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
239 unsigned int size, int is_last)
240 {
241 return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
242 }
243
hw_sha_finish(struct hash_algo * algo,void * ctx,void * dest_buf,int size)244 int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
245 int size)
246 {
247 return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
248 }
249