| /lib/raid6/ |
| A D | avx2.c | 38 int d, z, z0; in raid6_avx21_gen_syndrome() local 55 for (z = z0-2; z >= 0; z--) { in raid6_avx21_gen_syndrome() 87 int d, z, z0; in raid6_avx21_xor_syndrome() local 102 for (z = z0-1 ; z >= start ; z--) { in raid6_avx21_xor_syndrome() 113 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx21_xor_syndrome() 164 for (z = z0-1; z >= 0; z--) { in raid6_avx22_gen_syndrome() 215 for (z = z0-1 ; z >= start ; z--) { in raid6_avx22_xor_syndrome() 235 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx22_xor_syndrome() 297 for (z = z0; z >= 0; z--) { in raid6_avx24_gen_syndrome() 382 for (z = z0-1 ; z >= start ; z--) { in raid6_avx24_xor_syndrome() [all …]
|
| A D | sse2.c | 40 int d, z, z0; in raid6_sse21_gen_syndrome() local 57 for ( z = z0-2 ; z >= 0 ; z-- ) { in raid6_sse21_gen_syndrome() 92 int d, z, z0; in raid6_sse21_xor_syndrome() local 107 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse21_xor_syndrome() 118 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse21_xor_syndrome() 169 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_sse22_gen_syndrome() 221 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse22_xor_syndrome() 240 for ( z = start-1 ; z >= 0 ; z-- ) { in raid6_sse22_xor_syndrome() 305 for ( z = z0 ; z >= 0 ; z-- ) { in raid6_sse24_gen_syndrome() 393 for ( z = z0-1 ; z >= start ; z-- ) { in raid6_sse24_xor_syndrome() [all …]
|
| A D | avx512.c | 67 for (z = z0-2; z >= 0; z--) { in raid6_avx5121_gen_syndrome() 122 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5121_xor_syndrome() 136 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5121_xor_syndrome() 196 for (z = z0-1; z >= 0; z--) { in raid6_avx5122_gen_syndrome() 216 : "m" (dptr[z][d]), "m" (dptr[z][d+64])); in raid6_avx5122_gen_syndrome() 258 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5122_xor_syndrome() 281 for (z = start-1 ; z >= 0 ; z--) { in raid6_avx5122_xor_syndrome() 354 for (z = z0; z >= 0; z--) { in raid6_avx5124_gen_syndrome() 392 : "m" (dptr[z][d]), "m" (dptr[z][d+64]), in raid6_avx5124_gen_syndrome() 456 for (z = z0-1 ; z >= start ; z--) { in raid6_avx5124_xor_syndrome() [all …]
|
| A D | rvv.c | 56 for (z = z0 - 1 ; z >= 0 ; z--) { in raid6_rvv1_gen_syndrome_real() 130 for (z = z0 - 1; z >= start; z--) { in raid6_rvv1_xor_syndrome_real() 157 for (z = start - 1; z >= 0; z--) { in raid6_rvv1_xor_syndrome_real() 233 for (z = z0 - 1; z >= 0; z--) { in raid6_rvv2_gen_syndrome_real() 326 for (z = z0 - 1; z >= start; z--) { in raid6_rvv2_xor_syndrome_real() 362 for (z = start - 1; z >= 0; z--) { in raid6_rvv2_xor_syndrome_real() 461 for (z = z0 - 1; z >= 0; z--) { in raid6_rvv4_gen_syndrome_real() 588 for (z = z0 - 1; z >= start; z--) { in raid6_rvv4_xor_syndrome_real() 642 for (z = start - 1; z >= 0; z--) { in raid6_rvv4_xor_syndrome_real() 787 for (z = z0 - 1; z >= 0; z--) { in raid6_rvv8_gen_syndrome_real() [all …]
|
| A D | loongarch_simd.c | 37 int d, z, z0; in raid6_lsx_gen_syndrome() local 62 for (z = z0-1; z >= 0; z--) { in raid6_lsx_gen_syndrome() 64 asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE])); in raid6_lsx_gen_syndrome() 119 int d, z, z0; in raid6_lsx_xor_syndrome() local 145 for (z = z0-1; z >= start; z--) { in raid6_lsx_xor_syndrome() 184 for (z = start-1; z >= 0; z--) { in raid6_lsx_xor_syndrome() 268 int d, z, z0; in raid6_lasx_gen_syndrome() local 289 for (z = z0-1; z >= 0; z--) { in raid6_lasx_gen_syndrome() 328 int d, z, z0; in raid6_lasx_xor_syndrome() local 350 for (z = z0-1; z >= start; z--) { in raid6_lasx_xor_syndrome() [all …]
|
| A D | sse1.c | 44 int d, z, z0; in raid6_sse11_gen_syndrome() local 61 for ( z = z0-2 ; z >= 0 ; z-- ) { in raid6_sse11_gen_syndrome() 62 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 70 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome() 103 int d, z, z0; in raid6_sse12_gen_syndrome() local 122 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_sse12_gen_syndrome() 123 asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); in raid6_sse12_gen_syndrome() 132 asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); in raid6_sse12_gen_syndrome() 133 asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); in raid6_sse12_gen_syndrome()
|
| A D | mmx.c | 39 int d, z, z0; in raid6_mmx1_gen_syndrome() local 53 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_mmx1_gen_syndrome() 54 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_mmx1_gen_syndrome() 87 int d, z, z0; in raid6_mmx2_gen_syndrome() local 104 for ( z = z0-1 ; z >= 0 ; z-- ) { in raid6_mmx2_gen_syndrome() 113 asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); in raid6_mmx2_gen_syndrome() 114 asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); in raid6_mmx2_gen_syndrome()
|
| A D | s390vx.uc | 41 #define AND(x, y, z) fpu_vn(x, y, z) 42 #define XOR(x, y, z) fpu_vx(x, y, z) 51 int d, z, z0; 64 for (z = z0 - 1; z >= 0; z--) { 69 LOAD_DATA(16,&dptr[z][d]); 84 int d, z, z0; 98 for (z = z0 - 1; z >= start; z--) { 103 LOAD_DATA(16,&dptr[z][d]); 108 for (z = start - 1; z >= 0; z--) {
|
| A D | neon.uc | 61 int d, z, z0; 72 for ( z = z0-1 ; z >= 0 ; z-- ) { 73 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); 92 int d, z, z0; 106 for ( z = z0-1 ; z >= start ; z-- ) { 107 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); 117 for ( z = start-1 ; z >= 3 ; z -= 4 ) { 125 switch (z) {
|
| A D | int.uc | 79 int d, z, z0; 89 for ( z = z0-1 ; z >= 0 ; z-- ) { 90 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 108 int d, z, z0; 119 for ( z = z0-1 ; z >= start ; z-- ) { 120 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; 129 for ( z = start-1 ; z >= 0 ; z-- ) {
|
| A D | vpermxor.uc | 48 int d, z, z0; 58 for (z = z0-1; z>=0; z--) { 59 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
|
| A D | altivec.uc | 75 int d, z, z0; 86 for ( z = z0-1 ; z >= 0 ; z-- ) { 87 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
|
| /lib/zlib_inflate/ |
| A D | inflate.c | 795 z->avail_out = 0; in zlib_inflateIncomp() 796 z->next_out = (unsigned char*)z->next_in + z->avail_in; in zlib_inflateIncomp() 798 zlib_updatewindow(z, z->avail_in); in zlib_inflateIncomp() 801 z->avail_out = saved_ao; in zlib_inflateIncomp() 802 z->next_out = saved_no; in zlib_inflateIncomp() 804 z->adler = state->check = in zlib_inflateIncomp() 805 UPDATE(state->check, z->next_in, z->avail_in); in zlib_inflateIncomp() 807 z->total_out += z->avail_in; in zlib_inflateIncomp() 808 z->total_in += z->avail_in; in zlib_inflateIncomp() 809 z->next_in += z->avail_in; in zlib_inflateIncomp() [all …]
|
| /lib/crypto/ |
| A D | curve25519-hacl64.c | 133 output[ctr] = z; in fmul_shift_reduce() 138 output[ctr] = z; in fmul_shift_reduce() 143 output[ctr] = z; in fmul_shift_reduce() 148 output[ctr] = z; in fmul_shift_reduce() 311 fmul_fmul(b0, t00, z); in crecip_crecip() 486 u64 *z = p + 5; in addanddouble_fmonty() local 498 fsum(x, z); in addanddouble_fmonty() 748 u64 *z = point + 5; in format_scalar_of_point() local 752 crecip(zmone, z); in format_scalar_of_point() 763 u64 *z = buf0 + 5; in curve25519_generic() local [all …]
|
| A D | sm3.c | 57 #define FF1(x, y, z) (x ^ y ^ z) argument 58 #define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) argument 60 #define GG1(x, y, z) FF1(x, y, z) argument 61 #define GG2(x, y, z) ((x & y) | (~x & z)) argument
|
| A D | sha256.c | 49 #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) argument 50 #define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y)))) argument
|
| A D | sha512.c | 65 #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) argument 66 #define Maj(x, y, z) (((x) & (y)) | ((z) & ((x) | (y)))) argument
|
| A D | aes.c | 105 u32 z = w & 0x40404040; in mul_by_x2() local 108 return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b; in mul_by_x2()
|
| A D | curve25519-fiat32.c | 102 static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) in cmovznz32() argument 105 return (t&nz) | ((~t)&z); in cmovznz32() 560 static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) in fe_loose_invert() argument 568 fe_sq_tl(&t0, z); in fe_loose_invert() 572 fe_mul_tlt(&t1, z, &t1); in fe_loose_invert() 610 static __always_inline void fe_invert(fe *out, const fe *z) in fe_invert() argument 613 fe_copy_lt(&l, z); in fe_invert()
|
| /lib/tests/ |
| A D | test_bits.c | 122 int z, w; in genmask_input_check_test() local 129 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0)); in genmask_input_check_test() 130 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z)); in genmask_input_check_test() 131 KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w)); in genmask_input_check_test()
|
| /lib/ |
| A D | inflate.c | 351 unsigned z; /* number of entries in current table */ in huft_build() local 450 z = 0; /* ditto */ in huft_build() 470 z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ in huft_build() 476 if (j < z) in huft_build() 477 while (++j < z) /* try smaller tables up to z bits */ in huft_build() 485 z = 1 << j; /* table entries for j-bit table */ in huft_build() 488 if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == in huft_build() 497 hufts += z + 1; /* track memory usage */ in huft_build() 536 for (j = i >> w; j < z; j += f) in huft_build()
|
| A D | bch.c | 826 z->deg = 1; in compute_trace_bk_mod() 827 z->c[0] = 0; in compute_trace_bk_mod() 828 z->c[1] = bch->a_pow_tab[k]; in compute_trace_bk_mod() 838 for (j = z->deg; j >= 0; j--) { in compute_trace_bk_mod() 839 out->c[j] ^= z->c[j]; in compute_trace_bk_mod() 840 z->c[2*j] = gf_sqr(bch, z->c[j]); in compute_trace_bk_mod() 841 z->c[2*j+1] = 0; in compute_trace_bk_mod() 843 if (z->deg > out->deg) in compute_trace_bk_mod() 844 out->deg = z->deg; in compute_trace_bk_mod() 847 z->deg *= 2; in compute_trace_bk_mod() [all …]
|
| /lib/math/ |
| A D | div64.c | 202 u64 x, y, z; in mul_u64_u64_div_u64() local 206 z = (u64)a_hi * b_hi + (u32)(y >> 32); in mul_u64_u64_div_u64() 208 z += (u32)(y >> 32); in mul_u64_u64_div_u64() 211 u64 n_lo = x, n_hi = z; in mul_u64_u64_div_u64()
|
| /lib/crypto/mpi/ |
| A D | mpicoder.c | 335 int x, j, z, lzeros, ents; in mpi_read_raw_from_sgl() local 396 z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; in mpi_read_raw_from_sgl() 397 z %= BYTES_PER_MPI_LIMB; in mpi_read_raw_from_sgl() 407 if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { in mpi_read_raw_from_sgl() 412 z += x; in mpi_read_raw_from_sgl()
|
| /lib/crypto/x86/ |
| A D | poly1305-x86_64-cryptogams.pl | 2225 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4)); 2226 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); 2227 map(s/%y/%z/,($MASK)); 2358 vmovdqu64 16*0($inp),%z#$T3 2359 vmovdqu64 16*4($inp),%z#$T4 2724 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT)); 3580 map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); 3581 map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); 3583 map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2); 3780 map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); [all …]
|