| /lib/crypto/arm/ |
| A D | sha256-ce.S | 32 .macro add_only, ev, s0 34 .ifnb \s0 39 .ifnb \s0 40 vadd.u32 ta\ev, q\s0, k\ev 44 .macro add_update, ev, s0, s1, s2, s3 45 sha256su0.32 q\s0, q\s1 47 sha256su1.32 q\s0, q\s2, q\s3
|
| A D | sha1-ce-core.S | 36 .macro add_only, op, ev, rc, s0, dg1 37 .ifnb \s0 38 vadd.u32 tb\ev, q\s0, \rc 48 .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 49 sha1su0.32 q\s0, q\s1, q\s2 51 sha1su1.32 q\s0, q\s3
|
| A D | sha512-armv4.pl | 559 my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps 569 vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] 572 vshr.u64 $t0,$s0,#@sigma0[0] 574 vshr.u64 $t1,$s0,#@sigma0[1] 576 vshr.u64 $s1,$s0,#@sigma0[2] 577 vsli.64 $t0,$s0,#`64-@sigma0[0]` 578 vsli.64 $t1,$s0,#`64-@sigma0[1]` 579 vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9] 582 vadd.i64 @X[$i%8],$s0
|
| A D | blake2s-core.S | 68 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3 70 ldr M_0, [sp, #32 + 4 * \s0] 128 .macro _blake2s_round s0, s1, s2, s3, s4, s5, s6, s7, \ 135 \s0, \s1, \s2, \s3
|
| /lib/crypto/arm64/ |
| A D | sha256-ce.S | 29 .macro add_only, ev, rc, s0 32 add t1.4s, v\s0\().4s, \rc\().4s 36 .ifnb \s0 37 add t0.4s, v\s0\().4s, \rc\().4s 44 .macro add_update, ev, rc, s0, s1, s2, s3 45 sha256su0 v\s0\().4s, v\s1\().4s 47 sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
|
| A D | sha1-ce-core.S | 34 .macro add_only, op, ev, rc, s0, dg1 36 add t1.4s, v\s0\().4s, \rc\().4s 44 .ifnb \s0 45 add t0.4s, v\s0\().4s, \rc\().4s 52 .macro add_update, op, ev, rc, s0, s1, s2, s3, dg1 53 sha1su0 v\s0\().4s, v\s1\().4s, v\s2\().4s 55 sha1su1 v\s0\().4s, v\s3\().4s
|
| /lib/crypto/riscv/ |
| A D | chacha-riscv64-zvkb.S | 63 #define KEY0 s0 146 sd s0, 0(sp) 283 ld s0, 0(sp)
|
| /lib/crypto/mips/ |
| A D | poly1305-mips.pl | 42 ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); 227 ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2); 265 sd $s0,0($sp) 434 ld $s0,0($sp) 734 ($s0,$s1,$s2,$s3,$s4, $s5,$s6,$s7,$s8, $s9,$s10,$s11); 762 sw $s0, 4*0($sp) 1144 lw $s0, 4*0($sp)
|
| A D | chacha-core.S | 29 #define T1 $s0 215 sw $s0, 0($sp) 294 lw $s0, 0($sp)
|
| /lib/crypto/x86/ |
| A D | chacha-ssse3-x86_64.S | 122 # x0..3 = s0..3 135 # o0 = i0 ^ (x0 + s0) 245 # x0..15[0-3] = s0..3[0..3] 503 # x0[0-3] += s0[0] 504 # x1[0-3] += s0[1] 512 # x2[0-3] += s0[2] 513 # x3[0-3] += s0[3]
|
| A D | sha512-ssse3-asm.S | 246 pxor %xmm4, %xmm3 # XMM3 = s0(W[t-15]) 248 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) 251 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] 253 paddq %xmm1, %xmm0 # XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16]
|
| A D | sha256-avx-asm.S | 154 ## compute s0 four at a time and s1 two at a time 171 ## compute s0 219 vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 227 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
| A D | sha256-ssse3-asm.S | 148 ## compute s0 four at a time and s1 two at a time 166 ## compute s0 219 pxor XTMP4, XTMP1 # XTMP1 = s0 227 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
| A D | chacha-avx512vl-x86_64.S | 41 # x0..3[0-2] = s0..3 113 # o0 = i0 ^ (x0 + s0) 208 # x0..3[0-4] = s0..3 325 # o0 = i0 ^ (x0 + s0), first block 379 # o0 = i0 ^ (x0 + s0), third block
|
| A D | sha512-avx-asm.S | 237 vpxor %xmm5, %xmm6, %xmm6 # XMM6 = s0(W[t-15]) 239 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) 242 # s0(W[t-15]) + W[t-16]
|
| A D | chacha-avx2-x86_64.S | 51 # x0..3[0-2] = s0..3 140 # o0 = i0 ^ (x0 + s0) 245 # x0..3[0-4] = s0..3 391 # o0 = i0 ^ (x0 + s0), first block 445 # o0 = i0 ^ (x0 + s0), third block
|
| A D | sha512-avx2-asm.S | 226 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 230 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 232 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} 234 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
|
| A D | sha256-avx2-asm.S | 239 vpxor XTMP4, XTMP3, XTMP1 # XTMP1 = s0 244 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
|
| /lib/crypto/ |
| A D | sha256.c | 53 #define s0(x) (ror32((x), 7) ^ ror32((x), 18) ^ ((x) >> 3)) macro 63 W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16]; in BLEND_OP()
|
| A D | sha512.c | 69 #define s0(x) (ror64((x), 1) ^ ror64((x), 8) ^ ((x) >> 7)) macro 94 s0(W[(j - 15) & 15]); in sha512_block_generic()
|
| A D | curve25519-hacl64.c | 228 u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) + in fsquare_fsquare__() local 238 tmp[0] = s0; in fsquare_fsquare__()
|
| /lib/crc/arm64/ |
| A D | crc32-core.S | 169 ldp s0, s1, [x5]
|
| /lib/crypto/powerpc/ |
| A D | poly1305-p10le_64.S | 25 …22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, s1, s0
|