| /crypto/ |
| A D | cast5_generic.c | 327 t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); in __cast5_encrypt() 328 t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); in __cast5_encrypt() 329 t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); in __cast5_encrypt() 330 t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); in __cast5_encrypt() 331 t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); in __cast5_encrypt() 332 t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); in __cast5_encrypt() 333 t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); in __cast5_encrypt() 334 t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); in __cast5_encrypt() 335 t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); in __cast5_encrypt() 336 t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); in __cast5_encrypt() [all …]
|
| A D | michael_mic.c | 18 u32 l, r; member 25 u32 l, r; member 36 r ^= rol32(l, 17); \ 37 l += r; \ 38 r ^= xswap(l); \ 39 l += r; \ 40 r ^= rol32(l, 3); \ 41 l += r; \ 42 r ^= ror32(l, 2); \ 43 l += r; \ [all …]
|
| A D | khazad.c | 760 int r; in khazad_setkey() local 768 for (r = 0; r <= KHAZAD_ROUNDS; r++) { in khazad_setkey() 769 ctx->E[r] = T0[(int)(K1 >> 56) ] ^ in khazad_setkey() 777 c[r] ^ K2; in khazad_setkey() 779 K1 = ctx->E[r]; in khazad_setkey() 783 for (r = 1; r < KHAZAD_ROUNDS; r++) { in khazad_setkey() 784 K1 = ctx->E[KHAZAD_ROUNDS - r]; in khazad_setkey() 785 ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^ in khazad_setkey() 803 int r; in khazad_crypt() local 808 for (r = 1; r < KHAZAD_ROUNDS; r++) { in khazad_crypt() [all …]
|
| A D | anubis.c | 466 int N, R, i, r; in anubis_setkey() local 489 for (r = 0; r <= R; r++) { in anubis_setkey() 529 if (r == R) in anubis_setkey() 557 for (r = 1; r < R; r++) { in anubis_setkey() 560 ctx->D[r][i] = in anubis_setkey() 574 int i, r; in anubis_crypt() local 589 for (r = 1; r < R; r++) { in anubis_crypt() 595 roundKey[r][0]; in anubis_crypt() 601 roundKey[r][1]; in anubis_crypt() 607 roundKey[r][2]; in anubis_crypt() [all …]
|
| A D | blake2b_generic.c | 48 a = a + b + m[blake2b_sigma[r][2*i+0]]; \ 52 a = a + b + m[blake2b_sigma[r][2*i+1]]; \ 58 #define ROUND(r) \ argument 60 G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ 61 G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ 62 G(r,2,v[ 2],v[ 6],v[10],v[14]); \ 63 G(r,3,v[ 3],v[ 7],v[11],v[15]); \ 64 G(r,4,v[ 0],v[ 5],v[10],v[15]); \ 65 G(r,5,v[ 1],v[ 6],v[11],v[12]); \ 66 G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ [all …]
|
| A D | fcrypt.c | 237 __be32 l, r; in fcrypt_encrypt() member 242 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt() 243 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt() 244 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt() 245 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt() 246 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt() 247 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt() 248 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); in fcrypt_encrypt() 249 F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); in fcrypt_encrypt() 250 F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); in fcrypt_encrypt() [all …]
|
| A D | ecc.c | 559 u64 r[ECC_MAX_DIGITS * 2]; in vli_mmod_special() local 565 vli_add(r, r, t, ndigits * 2); in vli_mmod_special() 570 vli_sub(r, r, t, ndigits * 2); in vli_mmod_special() 593 u64 r[ECC_MAX_DIGITS * 2]; in vli_mmod_special2() local 620 vli_sub(r, r, qc, ndigits * 2); in vli_mmod_special2() 622 vli_add(r, r, qc, ndigits * 2); in vli_mmod_special2() 625 vli_add(r, r, m, ndigits * 2); in vli_mmod_special2() 627 vli_sub(r, r, m, ndigits * 2); in vli_mmod_special2() 698 vli_sub(r, product, r, ndigits * 2); in vli_mmod_barrett() 703 carry = vli_sub(r, r, mod, ndigits); in vli_mmod_barrett() [all …]
|
| A D | ecrdsa.c | 76 u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */ in ecrdsa_verify() local 104 vli_from_be64(r, src + ndigits * sizeof(u64), ndigits); in ecrdsa_verify() 107 if (vli_is_zero(r, ndigits) || in ecrdsa_verify() 108 vli_cmp(r, ctx->curve->n, ndigits) >= 0 || in ecrdsa_verify() 126 vli_sub(_r, ctx->curve->n, r, ndigits); in ecrdsa_verify() 136 if (!vli_cmp(cc.x, r, ndigits)) in ecrdsa_verify()
|
| A D | ecdsa.c | 23 static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s) in _ecdsa_verify() argument 35 if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 || in _ecdsa_verify() 48 vli_mod_mult_slow(u2, r, s1, curve->n, ndigits); in _ecdsa_verify() 57 if (!vli_cmp(res.x, r, ndigits)) in _ecdsa_verify() 86 return _ecdsa_verify(ctx, hash, sig->r, sig->s); in ecdsa_verify()
|
| A D | cast6_generic.c | 27 #define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \ argument 29 #define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ argument 31 #define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \ argument
|
| A D | wp512.c | 781 int i, r; in wp512_process_buffer() local 799 for (r = 0; r < WHIRLPOOL_ROUNDS; r++) { in wp512_process_buffer() 809 rc[r]; in wp512_process_buffer()
|
| A D | adiantum.c | 186 static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2) in le128_add() argument 191 r->b = cpu_to_le64(x + y); in le128_add() 192 r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) + in le128_add() 197 static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2) in le128_sub() argument 202 r->b = cpu_to_le64(x - y); in le128_sub() 203 r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) - in le128_sub()
|
| A D | ecdsasignature.asn1 | 2 r INTEGER ({ ecdsa_get_signature_r }),
|
| A D | streebog_generic.c | 925 struct streebog_uint512 *r) in streebog_add512() argument 937 r->qword[i] = cpu_to_le64(sum); in streebog_add512()
|
| A D | ecdsa-p1363.c | 32 ecc_digits_from_bytes(src, keylen, sig.r, ndigits); in ecdsa_p1363_verify()
|
| A D | ecdsa-x962.c | 63 return ecdsa_get_signature_rs(sig_ctx->sig.r, hdrlen, tag, value, vlen, in ecdsa_get_signature_r()
|