| /lib/crypto/arm/ |
| A D | sha1-armv4-large.S | 102 add r7,r7,r9 @ E+=X[i] 411 add r7,r7,r11,ror#2 428 add r6,r6,r11,ror#2 445 add r5,r5,r11,ror#2 462 add r4,r4,r11,ror#2 479 add r3,r3,r11,ror#2 490 add r3,r8,r3 491 add r4,r9,r4 492 add r5,r10,r5,ror#2 493 add r6,r11,r6,ror#2 [all …]
|
| A D | chacha-scalar-core.S | 149 add sp, #8 182 add X0, X0, r8 183 add X1, X1, r9 184 add X2, X2, r10 185 add X3, X3, r11 247 add r8, #1 283 add X0, X0, r8 284 add X1, X1, r9 362 add sp, #96 402 0: add sp, #76 [all …]
|
| A D | blake2s-core.S | 76 add \a0, \a0, M_0 77 add \a1, \a1, M_1 84 add \c0, \c0, \d0, ror #16 85 add \c1, \c1, \d1, ror #16 95 add \a0, \a0, \b0, ror #12 96 add \a1, \a1, \b1, ror #12 97 add \a0, \a0, M_0 98 add \a1, \a1, M_1 105 add \c0, \c0, \d0, ror#8 106 add \c1, \c1, \d1, ror#8 [all …]
|
| A D | sha1-armv7-neon.S | 96 add e, e, RT1; \ 99 add e, e, RT0; 109 add e, e, RT3; \ 112 add e, e, RT0; \ 124 add e, e, RT0; \ 127 add e, e, RT1; 587 add _a, RT0; 589 add _b, RT1; 590 add _c, RT2; 591 add _d, RT3; [all …]
|
| /lib/crypto/arm64/ |
| A D | chacha-neon-core.S | 197 add x10, x10, x5 214 add x8, x0, #16 246 add a0, a0, a4 248 add a1, a1, a5 250 add a2, a2, a6 252 add a3, a3, a7 647 add x3, x2, x4 735 add x5, x5, x1 751 add x5, x5, x1 763 add x6, x6, x1 [all …]
|
| A D | sha512-ce-core.S | 78 add v5.2d, v\rc0\().2d, v\in0\().2d 82 add v\i3\().2d, v\i3\().2d, v5.2d 91 add v\i4\().2d, v\i1\().2d, v\i3\().2d 184 add v8.2d, v8.2d, v0.2d 185 add v9.2d, v9.2d, v1.2d 186 add v10.2d, v10.2d, v2.2d 187 add v11.2d, v11.2d, v3.2d
|
| A D | sha256-ce.S | 32 add t1.4s, v\s0\().4s, \rc\().4s 37 add t0.4s, v\s0\().4s, \rc\().4s 98 add t0.4s, v16.4s, v0.4s 123 add dgav.4s, dgav.4s, dg0v.4s 124 add dgbv.4s, dgbv.4s, dg1v.4s
|
| A D | sha1-ce-core.S | 36 add t1.4s, v\s0\().4s, \rc\().4s 45 add t0.4s, v\s0\().4s, \rc\().4s 88 add t0.4s, v8.4s, k0.4s 116 add dgbv.2s, dgbv.2s, dg1v.2s 117 add dgav.4s, dgav.4s, dg0v.4s
|
| /lib/crypto/powerpc/ |
| A D | sha1-powerpc-asm.S | 44 add r0,RE(t),r15; \ 45 add RT(t),RT(t),r6; \ 46 add r14,r0,W(t); \ 49 add RT(t),RT(t),r14 57 add r0,RE(t),r15; \ 61 add r0,r0,W(t); \ 73 add r0,r0,W(t); \ 74 add RT(t),RT(t),r0 85 add r0,r0,W(t); \ 102 add r0,r0,W(t); \ [all …]
|
| A D | sha256-spe-asm.S | 120 add d,d,h; /* 1: d = d + temp1 */ \ 151 add c,c,g; /* 2: d = d + temp1 */ \ 198 add d,d,h; /* 1: d = d + temp1 */ \ 278 add rH0,rH0,rW0 280 add rH1,rH1,rW1 282 add rH2,rH2,rW2 284 add rH3,rH3,rW3 286 add rH4,rH4,rW4 288 add rH5,rH5,rW5 290 add rH6,rH6,rW6 [all …]
|
| A D | sha1-spe-asm.S | 113 add e,e,rT0; /* 1: E = E + A' */ \ 115 add e,e,w0; /* 1: E = E + W */ \ 128 add d,d,rT2 /* 2: E = E + F */ 154 add d,d,rT1 /* 2: E = E + F */ 178 add d,d,rT0 /* 2: E = E + A' */ 206 add d,d,rT0 /* 2: E = E + A' */ 275 add rH0,rH0,rT3 277 add rH1,rH1,rW1 279 add rH2,rH2,rW2 281 add rH3,rH3,rW3 [all …]
|
| /lib/crypto/x86/ |
| A D | sha512-avx2-asm.S | 117 # Add reg to mem using reg-mem add and store 119 add \p1, \p2 184 add frame_XFER(%rsp),h # h = k + w + h # -- 196 add h, d # d = k + w + h + d # -- 209 add y1, h # h = k + w + h + S0 # -- 260 add h, d # d = k + w + h + d # -- 422 add frame_XFER(%rsp), h # h = k + w + h # -- 585 add INP, NUM_BLKS # pointer to end of data 633 add $(4*32), TBL 646 add $(2*32), TBL [all …]
|
| A D | sha256-avx2-asm.S | 59 # Add reg to mem using reg-mem add and store 61 add \p1, \p2 174 add h, d # d = k + w + h + d # -- 190 add y1, h # h = k + w + h + S0 # -- 223 add h, d # d = k + w + h + d # -- 274 add h, d # d = k + w + h + d # -- 580 add $64, INP 608 add $4*32, SRND 623 add $2*32, SRND 652 add $2*32, SRND [all …]
|
| A D | sha256-avx-asm.S | 58 # Add reg to mem using reg-mem add and store 60 add \p1, \p2 177 add y0, y2 # y2 = S1 + CH 178 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH 212 add y0, y2 # y2 = S1 + CH 250 add y0, y2 # y2 = S1 + CH 289 add y0, y2 # y2 = S1 + CH 360 add INP, NUM_BLKS # pointer to end of data 405 add $4*16, TBL 422 add $2*16, TBL [all …]
|
| A D | sha256-ssse3-asm.S | 57 # Add reg to mem using reg-mem add and store 59 add \p1, \p2 173 add y0, y2 # y2 = S1 + CH 212 add y0, y2 # y2 = S1 + CH 253 add y0, y2 # y2 = S1 + CH 295 add y0, y2 # y2 = S1 + CH 332 add y0, y2 # y2 = S1 + CH 367 add INP, NUM_BLKS 417 add $4*16, TBL 433 add $2*16, TBL [all …]
|
| A D | sha512-avx-asm.S | 187 add WK_2(idx), T1# 193 add h_64, T1 196 add tmp0, T1 210 add T1, d_64 218 add tmp0, h_64 236 add WK_2(idx), T1 244 add h_64, T1 246 add tmp0, T1 259 add T1, d_64 264 add tmp0, h_64 [all …]
|
| A D | sha512-ssse3-asm.S | 177 add WK_2(idx), T1 189 add tmp0, T1 190 add h_64, T1 210 add tmp0, T2 211 add T1, d_64 223 add WK_2(idx), T1 233 add tmp0, T1 235 add h_64, T1 261 add tmp0, T2 262 add T1, d_64 [all …]
|
| A D | sha1-ssse3-and-avx.S | 84 add BUF, CNT 163 add $64, BUFFER_PTR # move to the next 64-byte block 244 add \hash, \val 260 add WK(\round), \e 264 add T1, \e 265 add WK(\round + 1), \d 270 add \a, \e 271 add T1, \d 278 add T1, \d
|
| A D | sha1-avx2-asm.S | 152 add \hash, \val 380 add WK(\r), E 401 add WK(\r), E 415 add TA, E /* E += A >>> 5 */ 423 add WK(\r), E 441 add TA, E /* E += A >>> 5 */ 450 add $\d, RTA
|
| /lib/zstd/common/ |
| A D | compiler.h | 246 unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add) in ZSTD_wrappedPtrAdd() argument 248 return ptr + add; in ZSTD_wrappedPtrAdd() 271 unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add) in ZSTD_maybeNullPtrAdd() argument 273 return add > 0 ? ptr + add : ptr; in ZSTD_maybeNullPtrAdd()
|
| /lib/crc/arm64/ |
| A D | crc32-core.S | 67 add x8, x8, x1 68 add x1, x1, x7 161 add x4, x3, x3, lsl #1 // x4 := 3 * x3 162 add x7, in, x3, lsl #4 // x7 := in + 16 * x3 163 add x8, in, x3, lsl #5 // x8 := in + 32 * x3 164 add x9, in, x4, lsl #4 // x9 := in + 16 * x4 168 add x5, x5, x4, lsl #2 // x5 += 12 * x3
|
| /lib/crc/x86/ |
| A D | crc32c-3way.S | 88 add n_misaligned_q, bufp 141 add $32, bufp 146 add $4, %eax 153 add $8, bufp
|
| A D | crc-pclmul-template.S | 179 add $\vl, BUF 306 add $32, BUF 311 add $16, BUF 321 add $VL, BUF 333 add $VL, BUF 342 add $-4*VL, LEN // Shorter than 'sub 4*VL' when VL=32 355 add $-4*VL, LEN
|
| /lib/crypto/sparc/ |
| A D | sha1_asm.S | 31 add %o1, 0x40, %o1 69 add %o1, 0x40, %o1
|
| A D | sha256_asm.S | 34 add %o1, 0x40, %o1 75 add %o1, 0x40, %o1
|