1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Glue code for SHA512 hashing optimized for sparc64 crypto opcodes.
3  *
4  * This is based largely upon crypto/sha512_generic.c
5  *
6  * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
7  * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
8  * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
9  */
10 
11 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
12 
13 #include <crypto/internal/hash.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/types.h>
18 #include <crypto/sha2.h>
19 #include <crypto/sha512_base.h>
20 
21 #include <asm/pstate.h>
22 #include <asm/elf.h>
23 
24 #include "opcodes.h"
25 
26 asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data,
27 					 unsigned int rounds);
28 
__sha512_sparc64_update(struct sha512_state * sctx,const u8 * data,unsigned int len,unsigned int partial)29 static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data,
30 				    unsigned int len, unsigned int partial)
31 {
32 	unsigned int done = 0;
33 
34 	if ((sctx->count[0] += len) < len)
35 		sctx->count[1]++;
36 	if (partial) {
37 		done = SHA512_BLOCK_SIZE - partial;
38 		memcpy(sctx->buf + partial, data, done);
39 		sha512_sparc64_transform(sctx->state, sctx->buf, 1);
40 	}
41 	if (len - done >= SHA512_BLOCK_SIZE) {
42 		const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
43 
44 		sha512_sparc64_transform(sctx->state, data + done, rounds);
45 		done += rounds * SHA512_BLOCK_SIZE;
46 	}
47 
48 	memcpy(sctx->buf, data + done, len - done);
49 }
50 
sha512_sparc64_update(struct shash_desc * desc,const u8 * data,unsigned int len)51 static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data,
52 				 unsigned int len)
53 {
54 	struct sha512_state *sctx = shash_desc_ctx(desc);
55 	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
56 
57 	/* Handle the fast case right here */
58 	if (partial + len < SHA512_BLOCK_SIZE) {
59 		if ((sctx->count[0] += len) < len)
60 			sctx->count[1]++;
61 		memcpy(sctx->buf + partial, data, len);
62 	} else
63 		__sha512_sparc64_update(sctx, data, len, partial);
64 
65 	return 0;
66 }
67 
sha512_sparc64_final(struct shash_desc * desc,u8 * out)68 static int sha512_sparc64_final(struct shash_desc *desc, u8 *out)
69 {
70 	struct sha512_state *sctx = shash_desc_ctx(desc);
71 	unsigned int i, index, padlen;
72 	__be64 *dst = (__be64 *)out;
73 	__be64 bits[2];
74 	static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
75 
76 	/* Save number of bits */
77 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
78 	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
79 
80 	/* Pad out to 112 mod 128 and append length */
81 	index = sctx->count[0] % SHA512_BLOCK_SIZE;
82 	padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index);
83 
84 	/* We need to fill a whole block for __sha512_sparc64_update() */
85 	if (padlen <= 112) {
86 		if ((sctx->count[0] += padlen) < padlen)
87 			sctx->count[1]++;
88 		memcpy(sctx->buf + index, padding, padlen);
89 	} else {
90 		__sha512_sparc64_update(sctx, padding, padlen, index);
91 	}
92 	__sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112);
93 
94 	/* Store state in digest */
95 	for (i = 0; i < 8; i++)
96 		dst[i] = cpu_to_be64(sctx->state[i]);
97 
98 	/* Wipe context */
99 	memset(sctx, 0, sizeof(*sctx));
100 
101 	return 0;
102 }
103 
sha384_sparc64_final(struct shash_desc * desc,u8 * hash)104 static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
105 {
106 	u8 D[64];
107 
108 	sha512_sparc64_final(desc, D);
109 
110 	memcpy(hash, D, 48);
111 	memzero_explicit(D, 64);
112 
113 	return 0;
114 }
115 
116 static struct shash_alg sha512 = {
117 	.digestsize	=	SHA512_DIGEST_SIZE,
118 	.init		=	sha512_base_init,
119 	.update		=	sha512_sparc64_update,
120 	.final		=	sha512_sparc64_final,
121 	.descsize	=	sizeof(struct sha512_state),
122 	.base		=	{
123 		.cra_name	=	"sha512",
124 		.cra_driver_name=	"sha512-sparc64",
125 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
126 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
127 		.cra_module	=	THIS_MODULE,
128 	}
129 };
130 
131 static struct shash_alg sha384 = {
132 	.digestsize	=	SHA384_DIGEST_SIZE,
133 	.init		=	sha384_base_init,
134 	.update		=	sha512_sparc64_update,
135 	.final		=	sha384_sparc64_final,
136 	.descsize	=	sizeof(struct sha512_state),
137 	.base		=	{
138 		.cra_name	=	"sha384",
139 		.cra_driver_name=	"sha384-sparc64",
140 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
141 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
142 		.cra_module	=	THIS_MODULE,
143 	}
144 };
145 
sparc64_has_sha512_opcode(void)146 static bool __init sparc64_has_sha512_opcode(void)
147 {
148 	unsigned long cfr;
149 
150 	if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
151 		return false;
152 
153 	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
154 	if (!(cfr & CFR_SHA512))
155 		return false;
156 
157 	return true;
158 }
159 
sha512_sparc64_mod_init(void)160 static int __init sha512_sparc64_mod_init(void)
161 {
162 	if (sparc64_has_sha512_opcode()) {
163 		int ret = crypto_register_shash(&sha384);
164 		if (ret < 0)
165 			return ret;
166 
167 		ret = crypto_register_shash(&sha512);
168 		if (ret < 0) {
169 			crypto_unregister_shash(&sha384);
170 			return ret;
171 		}
172 
173 		pr_info("Using sparc64 sha512 opcode optimized SHA-512/SHA-384 implementation\n");
174 		return 0;
175 	}
176 	pr_info("sparc64 sha512 opcode not available.\n");
177 	return -ENODEV;
178 }
179 
sha512_sparc64_mod_fini(void)180 static void __exit sha512_sparc64_mod_fini(void)
181 {
182 	crypto_unregister_shash(&sha384);
183 	crypto_unregister_shash(&sha512);
184 }
185 
186 module_init(sha512_sparc64_mod_init);
187 module_exit(sha512_sparc64_mod_fini);
188 
189 MODULE_LICENSE("GPL");
190 MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
191 
192 MODULE_ALIAS_CRYPTO("sha384");
193 MODULE_ALIAS_CRYPTO("sha512");
194 
195 #include "crop_devid.c"
196