1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
4 *
5 * Copyright (C) 2011-2012 International Business Machines Inc.
6 *
7 * Author: Kent Yoder <yoder1@us.ibm.com>
8 */
9
10 #include <crypto/internal/hash.h>
11 #include <crypto/sha2.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/string.h>
17 #include <linux/unaligned.h>
18
19 #include "nx_csbcpb.h"
20 #include "nx.h"
21
22 struct sha512_state_be {
23 __be64 state[SHA512_DIGEST_SIZE / 8];
24 u64 count[2];
25 };
26
nx_crypto_ctx_sha512_init(struct crypto_shash * tfm)27 static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
28 {
29 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
30 int err;
31
32 err = nx_crypto_ctx_sha_init(tfm);
33 if (err)
34 return err;
35
36 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
37
38 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
39
40 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
41
42 return 0;
43 }
44
nx_sha512_init(struct shash_desc * desc)45 static int nx_sha512_init(struct shash_desc *desc)
46 {
47 struct sha512_state_be *sctx = shash_desc_ctx(desc);
48
49 sctx->state[0] = __cpu_to_be64(SHA512_H0);
50 sctx->state[1] = __cpu_to_be64(SHA512_H1);
51 sctx->state[2] = __cpu_to_be64(SHA512_H2);
52 sctx->state[3] = __cpu_to_be64(SHA512_H3);
53 sctx->state[4] = __cpu_to_be64(SHA512_H4);
54 sctx->state[5] = __cpu_to_be64(SHA512_H5);
55 sctx->state[6] = __cpu_to_be64(SHA512_H6);
56 sctx->state[7] = __cpu_to_be64(SHA512_H7);
57 sctx->count[0] = 0;
58 sctx->count[1] = 0;
59
60 return 0;
61 }
62
nx_sha512_update(struct shash_desc * desc,const u8 * data,unsigned int len)63 static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
64 unsigned int len)
65 {
66 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
67 struct sha512_state_be *sctx = shash_desc_ctx(desc);
68 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
69 u64 to_process, leftover, total = len;
70 struct nx_sg *out_sg;
71 unsigned long irq_flags;
72 int rc = 0;
73 int data_len;
74 u32 max_sg_len;
75
76 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
77
78 memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
79 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
80 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
81
82 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
83 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
84 max_sg_len = min_t(u64, max_sg_len,
85 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
86
87 data_len = SHA512_DIGEST_SIZE;
88 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
89 &data_len, max_sg_len);
90 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
91
92 if (data_len != SHA512_DIGEST_SIZE) {
93 rc = -EINVAL;
94 goto out;
95 }
96
97 do {
98 struct nx_sg *in_sg = nx_ctx->in_sg;
99
100 to_process = total & ~(SHA512_BLOCK_SIZE - 1);
101
102 data_len = to_process;
103 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
104 &data_len, max_sg_len);
105
106 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
107
108 to_process = data_len;
109 leftover = total - to_process;
110
111 /*
112 * we've hit the nx chip previously and we're updating
113 * again, so copy over the partial digest.
114 */
115 memcpy(csbcpb->cpb.sha512.input_partial_digest,
116 csbcpb->cpb.sha512.message_digest,
117 SHA512_DIGEST_SIZE);
118
119 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
120 rc = -EINVAL;
121 goto out;
122 }
123
124 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
125 if (rc)
126 goto out;
127
128 atomic_inc(&(nx_ctx->stats->sha512_ops));
129
130 total -= to_process;
131 data += to_process;
132 sctx->count[0] += to_process;
133 if (sctx->count[0] < to_process)
134 sctx->count[1]++;
135 } while (leftover >= SHA512_BLOCK_SIZE);
136
137 rc = leftover;
138 memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
139 out:
140 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
141 return rc;
142 }
143
nx_sha512_finup(struct shash_desc * desc,const u8 * src,unsigned int nbytes,u8 * out)144 static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
145 unsigned int nbytes, u8 *out)
146 {
147 struct sha512_state_be *sctx = shash_desc_ctx(desc);
148 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
149 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
150 struct nx_sg *in_sg, *out_sg;
151 u32 max_sg_len;
152 unsigned long irq_flags;
153 u64 count0, count1;
154 int rc = 0;
155 int len;
156
157 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
158
159 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
160 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
161 max_sg_len = min_t(u64, max_sg_len,
162 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
163
164 /* final is represented by continuing the operation and indicating that
165 * this is not an intermediate operation
166 * copy over the partial digest */
167 memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
168 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
169 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
170
171 count0 = sctx->count[0] + nbytes;
172 count1 = sctx->count[1];
173
174 csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
175 csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
176 (count0 >> 61);
177
178 len = nbytes;
179 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
180
181 if (len != nbytes) {
182 rc = -EINVAL;
183 goto out;
184 }
185
186 len = SHA512_DIGEST_SIZE;
187 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
188 max_sg_len);
189
190 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
191 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
192
193 if (!nx_ctx->op.outlen) {
194 rc = -EINVAL;
195 goto out;
196 }
197
198 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
199 if (rc)
200 goto out;
201
202 atomic_inc(&(nx_ctx->stats->sha512_ops));
203 atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
204
205 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
206 out:
207 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
208 return rc;
209 }
210
nx_sha512_export(struct shash_desc * desc,void * out)211 static int nx_sha512_export(struct shash_desc *desc, void *out)
212 {
213 struct sha512_state_be *sctx = shash_desc_ctx(desc);
214 union {
215 u8 *u8;
216 u64 *u64;
217 } p = { .u8 = out };
218 int i;
219
220 for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
221 put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
222
223 put_unaligned(sctx->count[0], p.u64++);
224 put_unaligned(sctx->count[1], p.u64++);
225 return 0;
226 }
227
nx_sha512_import(struct shash_desc * desc,const void * in)228 static int nx_sha512_import(struct shash_desc *desc, const void *in)
229 {
230 struct sha512_state_be *sctx = shash_desc_ctx(desc);
231 union {
232 const u8 *u8;
233 const u64 *u64;
234 } p = { .u8 = in };
235 int i;
236
237 for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
238 sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
239
240 sctx->count[0] = get_unaligned(p.u64++);
241 sctx->count[1] = get_unaligned(p.u64++);
242 return 0;
243 }
244
245 struct shash_alg nx_shash_sha512_alg = {
246 .digestsize = SHA512_DIGEST_SIZE,
247 .init = nx_sha512_init,
248 .update = nx_sha512_update,
249 .finup = nx_sha512_finup,
250 .export = nx_sha512_export,
251 .import = nx_sha512_import,
252 .init_tfm = nx_crypto_ctx_sha512_init,
253 .exit_tfm = nx_crypto_ctx_shash_exit,
254 .descsize = sizeof(struct sha512_state_be),
255 .statesize = sizeof(struct sha512_state_be),
256 .base = {
257 .cra_name = "sha512",
258 .cra_driver_name = "sha512-nx",
259 .cra_priority = 300,
260 .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
261 .cra_blocksize = SHA512_BLOCK_SIZE,
262 .cra_module = THIS_MODULE,
263 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
264 }
265 };
266