Home
last modified time | relevance | path

Searched refs:W (Results 1 – 14 of 14) sorted by relevance

/lib/crypto/powerpc/
A Dsha1-powerpc-asm.S58 xor r5,W((t)+4-3),W((t)+4-8); \
60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
62 xor W((t)+4),W((t)+4),r5; \
64 rotlwi W((t)+4),W((t)+4),1
84 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
86 xor W((t)+4),W((t)+4),r5; \
88 rotlwi W((t)+4),W((t)+4),1
97 xor r5,W((t)+4-3),W((t)+4-8); \
99 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
103 xor W((t)+4),W((t)+4),r5; \
[all …]
/lib/crypto/x86/
A Dsha1-ssse3-and-avx.S386 psrld $31, W
391 pslld $2, W
411 pxor W_minus_28, W # W is W_minus_32 before xor
418 psrld $30, W
500 vpxor W_minus_08, W, W
503 vpxor W_TMP1, W, W
507 vpsrld $31, W, W
523 vpxor W_minus_28, W, W # W is W_minus_32 before xor
526 vpxor W_TMP1, W, W
529 vpsrld $30, W, W
[all …]
A Dsha512-ssse3-asm.S162 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]}
200 psrlq $(7-1), %xmm3 # XMM3 = ((W[t-15]>>1)^W[t-15])>>6
203 pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2]
206 pxor %xmm5, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]
209 psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6
212 psrlq $1, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1
228 pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2]
231 pxor %xmm5, %xmm4 # XMM4 = (W[t-15]<<7)^W[t-15]
248 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15])
251 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16]
[all …]
A Dsha512-avx-asm.S77 # W[t] + K[t] | W[t+1] + K[t+1]
167 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
184 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19
194 vpxor %xmm7, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8
203 vpxor %xmm3, %xmm2, %xmm2 # XMM2 = W[t-2]>>6 ^ W[t-2]<<3
208 vpxor %xmm9, %xmm8, %xmm8 # XMM8 = W[t-15]>>7 ^ W[t-15]<<63
213 vpxor %xmm8, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^
214 # W[t-15]>>7 ^ W[t-15]<<63
239 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15])
241 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] +
[all …]
A Dsha256-ssse3-asm.S149 ## compute W[-16] + W[-7] 4 at a time
154 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
162 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
167 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
171 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
175 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
191 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
215 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
227 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
264 paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
[all …]
A Dsha512-avx2-asm.S168 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
225 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
230 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
232 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
288 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
293 # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA}
296 vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]}
344 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
349 # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--}
353 vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --}
[all …]
A Dsha256-avx-asm.S155 ## compute W[-16] + W[-7] 4 at a time
160 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
167 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]
172 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
189 vpor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7
203 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
215 vpxor XTMP2, XTMP3, XTMP3 # XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR
224 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
227 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
261 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
[all …]
A Dsha256-avx2-asm.S162 vpalignr $4, X2, X3, XTMP0 # XTMP0 = W[-7]
168 vpaddd X0, XTMP0, XTMP0 # XTMP0 = W[-7] + W[-16]# y1 = (e >> 6)# S1
177 vpalignr $4, X0, X1, XTMP1 # XTMP1 = W[-15]
193 vpor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7
212 vpsrld $3, XTMP1, XTMP4 # XTMP4 = W[-15] >> 3
233 vpxor XTMP2, XTMP3, XTMP3 # XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
240 vpshufd $0b11111010, X3, XTMP2 # XTMP2 = W[-2] {BBAA}
244 vpaddd XTMP1, XTMP0, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
249 vpsrld $10, XTMP2, XTMP4 # XTMP4 = W[-2] >> 10 {BBAA}
285 vpaddd XTMP4, XTMP0, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
[all …]
/lib/crypto/
A Dsha256.c63 W[I] = s1(W[I - 2]) + W[I - 7] + s0(W[I - 15]) + W[I - 16]; in BLEND_OP()
95 BLEND_OP(i + 0, W); in sha256_block_generic()
96 BLEND_OP(i + 1, W); in sha256_block_generic()
97 BLEND_OP(i + 2, W); in sha256_block_generic()
98 BLEND_OP(i + 3, W); in sha256_block_generic()
99 BLEND_OP(i + 4, W); in sha256_block_generic()
100 BLEND_OP(i + 5, W); in sha256_block_generic()
101 BLEND_OP(i + 6, W); in sha256_block_generic()
102 BLEND_OP(i + 7, W); in sha256_block_generic()
141 u32 W[64]; in sha256_blocks_generic() local
[all …]
A Dsha512.c84 u64 W[16]; in sha512_block_generic() local
87 W[j] = get_unaligned_be64(data + j * sizeof(u64)); in sha512_block_generic()
92 W[j & 15] += s1(W[(j - 2) & 15]) + in sha512_block_generic()
93 W[(j - 7) & 15] + in sha512_block_generic()
94 s0(W[(j - 15) & 15]); in sha512_block_generic()
97 t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i & 15)]; in sha512_block_generic()
99 t1 = g + e1(d) + Ch(d, e, f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_block_generic()
101 t1 = f + e1(c) + Ch(c, d, e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_block_generic()
103 t1 = e + e1(b) + Ch(b, c, d) + sha512_K[i+3] + W[(i & 15) + 3]; in sha512_block_generic()
105 t1 = d + e1(a) + Ch(a, b, c) + sha512_K[i+4] + W[(i & 15) + 4]; in sha512_block_generic()
[all …]
A Dsm3.c66 #define I(i) (W[i] = get_unaligned_be32(data + i * 4))
67 #define W1(i) (W[i & 0x0f])
68 #define W2(i) (W[i & 0x0f] = \
69 P1(W[i & 0x0f] \
70 ^ W[(i-9) & 0x0f] \
71 ^ rol32(W[(i-3) & 0x0f], 15)) \
72 ^ rol32(W[(i-13) & 0x0f], 7) \
73 ^ W[(i-6) & 0x0f])
174 u32 W[16]; in sm3_block_generic() local
177 sm3_transform(sctx, data, W); in sm3_block_generic()
[all …]
A Dsha1.c43 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
45 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
47 #define setW(x, val) (W(x) = (val))
51 #define W(x) (array[(x)&15]) macro
58 #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
/lib/crypto/arm/
A Dsha1-armv7-neon.S218 veor.32 W, W, W_m08; \
222 veor W, W, tmp0; \
225 vshl.u32 tmp0, W, #1; \
229 vshr.u32 W, W, #31; \
254 veor W, W_m28; \
260 veor W, W_m16; \
263 veor W, tmp0; \
269 vshl.u32 tmp1, W, #2; \
272 vshr.u32 tmp0, W, #30; \
275 vorr W, tmp0, tmp1; \
[all …]
/lib/crypto/mips/
A Dchacha-core.S182 #define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \ argument
188 xor X(W), X(B); \
192 rotr X(W), 32 - S; \

Completed in 29 milliseconds