Home
last modified time | relevance | path

Searched refs:d (Results 1 – 25 of 84) sorted by relevance

1234

/lib/raid6/
A Davx2.c38 int d, z, z0; in raid6_avx21_gen_syndrome() local
49 for (d = 0; d < bytes; d += 32) { in raid6_avx21_gen_syndrome()
87 int d, z, z0; in raid6_avx21_xor_syndrome() local
97 for (d = 0 ; d < bytes ; d += 32) { in raid6_avx21_xor_syndrome()
145 int d, z, z0; in raid6_avx22_gen_syndrome() local
157 for (d = 0; d < bytes; d += 64) { in raid6_avx22_gen_syndrome()
197 int d, z, z0; in raid6_avx22_xor_syndrome() local
207 for (d = 0 ; d < bytes ; d += 64) { in raid6_avx22_xor_syndrome()
277 int d, z, z0; in raid6_avx24_gen_syndrome() local
296 for (d = 0; d < bytes; d += 128) { in raid6_avx24_gen_syndrome()
[all …]
A Dsse2.c40 int d, z, z0; in raid6_sse21_gen_syndrome() local
51 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_sse21_gen_syndrome()
92 int d, z, z0; in raid6_sse21_xor_syndrome() local
102 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_sse21_xor_syndrome()
150 int d, z, z0; in raid6_sse22_gen_syndrome() local
163 for ( d = 0 ; d < bytes ; d += 32 ) { in raid6_sse22_gen_syndrome()
203 int d, z, z0; in raid6_sse22_xor_syndrome() local
213 for ( d = 0 ; d < bytes ; d += 32 ) { in raid6_sse22_xor_syndrome()
282 int d, z, z0; in raid6_sse24_gen_syndrome() local
304 for ( d = 0 ; d < bytes ; d += 64 ) { in raid6_sse24_gen_syndrome()
[all …]
A Drvv.c30 unsigned long vl, d; in raid6_rvv1_gen_syndrome_real() local
45 for (d = 0; d < bytes; d += NSIZE * 1) { in raid6_rvv1_gen_syndrome_real()
103 unsigned long vl, d; in raid6_rvv1_xor_syndrome_real() local
118 for (d = 0 ; d < bytes ; d += NSIZE * 1) { in raid6_rvv1_xor_syndrome_real()
201 unsigned long vl, d; in raid6_rvv2_gen_syndrome_real() local
219 for (d = 0; d < bytes; d += NSIZE * 2) { in raid6_rvv2_gen_syndrome_real()
311 for (d = 0; d < bytes; d += NSIZE * 2) { in raid6_rvv2_xor_syndrome_real()
441 for (d = 0; d < bytes; d += NSIZE * 4) { in raid6_rvv4_gen_syndrome_real()
567 for (d = 0; d < bytes; d += NSIZE * 4) { in raid6_rvv4_xor_syndrome_real()
755 for (d = 0; d < bytes; d += NSIZE * 8) { in raid6_rvv8_gen_syndrome_real()
[all …]
A Davx512.c59 for (d = 0; d < bytes; d += 64) { in raid6_avx5121_gen_syndrome()
115 for (d = 0 ; d < bytes ; d += 64) { in raid6_avx5121_xor_syndrome()
187 for (d = 0; d < bytes; d += 128) { in raid6_avx5122_gen_syndrome()
223 : "m" (p[d]), "m" (p[d+64]), "m" (q[d]), in raid6_avx5122_gen_syndrome()
247 for (d = 0 ; d < bytes ; d += 128) { in raid6_avx5122_xor_syndrome()
307 : "m" (q[d]), "m" (q[d+64]), "m" (p[d]), in raid6_avx5122_xor_syndrome()
353 for (d = 0; d < bytes; d += 256) { in raid6_avx5124_gen_syndrome()
412 : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]), in raid6_avx5124_gen_syndrome()
413 "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]), in raid6_avx5124_gen_syndrome()
437 for (d = 0 ; d < bytes ; d += 256) { in raid6_avx5124_xor_syndrome()
[all …]
A Dloongarch_simd.c37 int d, z, z0; in raid6_lsx_gen_syndrome() local
52 for (d = 0; d < bytes; d += NSIZE*4) { in raid6_lsx_gen_syndrome()
134 for (d = 0; d < bytes; d += NSIZE*4) { in raid6_lsx_xor_syndrome()
235 : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]), in raid6_lsx_xor_syndrome()
236 "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]), in raid6_lsx_xor_syndrome()
237 "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]), in raid6_lsx_xor_syndrome()
238 "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3]) in raid6_lsx_xor_syndrome()
283 for (d = 0; d < bytes; d += NSIZE*2) { in raid6_lasx_gen_syndrome()
343 for (d = 0; d < bytes; d += NSIZE*2) { in raid6_lasx_xor_syndrome()
406 : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]), in raid6_lasx_xor_syndrome()
[all …]
A Dsse1.c44 int d, z, z0; in raid6_sse11_gen_syndrome() local
55 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_sse11_gen_syndrome()
70 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_sse11_gen_syndrome()
80 asm volatile("movntq %%mm2,%0" : "=m" (p[d])); in raid6_sse11_gen_syndrome()
81 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse11_gen_syndrome()
103 int d, z, z0; in raid6_sse12_gen_syndrome() local
116 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_sse12_gen_syndrome()
141 asm volatile("movntq %%mm2,%0" : "=m" (p[d])); in raid6_sse12_gen_syndrome()
142 asm volatile("movntq %%mm3,%0" : "=m" (p[d+8])); in raid6_sse12_gen_syndrome()
143 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse12_gen_syndrome()
[all …]
A Dmmx.c39 int d, z, z0; in raid6_mmx1_gen_syndrome() local
50 for ( d = 0 ; d < bytes ; d += 8 ) { in raid6_mmx1_gen_syndrome()
54 asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); in raid6_mmx1_gen_syndrome()
63 asm volatile("movq %%mm2,%0" : "=m" (p[d])); in raid6_mmx1_gen_syndrome()
65 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx1_gen_syndrome()
87 int d, z, z0; in raid6_mmx2_gen_syndrome() local
99 for ( d = 0 ; d < bytes ; d += 16 ) { in raid6_mmx2_gen_syndrome()
122 asm volatile("movq %%mm2,%0" : "=m" (p[d])); in raid6_mmx2_gen_syndrome()
123 asm volatile("movq %%mm3,%0" : "=m" (p[d+8])); in raid6_mmx2_gen_syndrome()
124 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx2_gen_syndrome()
[all …]
A Ds390vx.uc51 int d, z, z0;
61 for (d = 0; d < bytes; d += $#*NSIZE) {
62 LOAD_DATA(0,&dptr[z0][d]);
73 STORE_DATA(0,&p[d]);
74 STORE_DATA(8,&q[d]);
84 int d, z, z0;
94 for (d = 0; d < bytes; d += $#*NSIZE) {
114 LOAD_DATA(16,&p[d]);
116 STORE_DATA(16,&p[d]);
117 LOAD_DATA(16,&q[d]);
[all …]
A Dneon.uc61 int d, z, z0;
70 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
73 wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
82 vst1q_u8(&p[d+NSIZE*$$], wp$$);
83 vst1q_u8(&q[d+NSIZE*$$], wq$$);
92 int d, z, z0;
101 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
102 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
147 w1$$ = vld1q_u8(&q[d+NSIZE*$$]);
150 vst1q_u8(&p[d+NSIZE*$$], wp$$);
[all …]
A Dint.uc79 int d, z, z0;
87 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
88 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
90 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
98 *(unative_t *)&p[d+NSIZE*$$] = wp$$;
99 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
108 int d, z, z0;
116 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
120 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
135 *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
[all …]
/lib/crypto/
A Ddes.c628 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; in des_ekey()
633 pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; in des_ekey()
656 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; in des_ekey()
664 pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; in des_ekey()
682 for (d = 0; d < 16; ++d) { in des_ekey()
719 d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; in dkey()
724 pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; in dkey()
726 pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in dkey()
744 d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; in dkey()
749 pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; in dkey()
[all …]
A Dsm3.c52 #define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ argument
53 R(1, a, b, c, d, e, f, g, h, t, w1, w2)
54 #define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ argument
55 R(2, a, b, c, d, e, f, g, h, t, w1, w2)
77 u32 a, b, c, d, e, f, g, h, ss1, ss2; in sm3_transform() local
82 d = sctx->state[3]; in sm3_transform()
88 R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); in sm3_transform()
89 R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); in sm3_transform()
90 R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); in sm3_transform()
91 R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); in sm3_transform()
[all …]
A Dpoly1305-donna64.c43 u128 d0, d1, d2, d; in poly1305_core_blocks() local
74 d = (u128)h1 * s2; in poly1305_core_blocks()
75 d0 += d; in poly1305_core_blocks()
76 d = (u128)h2 * s1; in poly1305_core_blocks()
77 d0 += d; in poly1305_core_blocks()
79 d = (u128)h1 * r0; in poly1305_core_blocks()
80 d1 += d; in poly1305_core_blocks()
81 d = (u128)h2 * s2; in poly1305_core_blocks()
82 d1 += d; in poly1305_core_blocks()
85 d2 += d; in poly1305_core_blocks()
[all …]
/lib/crypto/mpi/
A Dmpi-bit.c73 limb = a->d[limbno]; in mpi_test_bit()
91 a->d[i] = 0; in mpi_set_bit()
122 x->d[i] = x->d[i+nlimbs]; in mpi_rshift()
123 x->d[i] = 0; in mpi_rshift()
127 mpihelp_rshift(x->d, x->d, x->nlimbs, nbits); in mpi_rshift()
137 x->d[i] = a->d[i]; in mpi_rshift()
146 x->d[i] = x->d[i+nlimbs]; in mpi_rshift()
147 x->d[i] = 0; in mpi_rshift()
151 mpihelp_rshift(x->d, x->d, x->nlimbs, nbits); in mpi_rshift()
163 mpihelp_rshift(x->d, a->d, x->nlimbs, nbits); in mpi_rshift()
[all …]
A Dmpiutil.c42 if (!a->d) { in mpi_alloc()
47 a->d = NULL; in mpi_alloc()
79 mpi_free_limb_space(a->d); in mpi_assign_limb_space()
80 a->d = ap; in mpi_assign_limb_space()
95 if (a->d) { in mpi_resize()
100 kfree_sensitive(a->d); in mpi_resize()
101 a->d = p; in mpi_resize()
104 if (!a->d) in mpi_resize()
117 kfree_sensitive(a->d); in mpi_free()
119 mpi_free_limb_space(a->d); in mpi_free()
[all …]
A Dmpi-sub-ui.c44 w->d[0] = vval; in mpi_sub_ui()
57 cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); in mpi_sub_ui()
58 w->d[u->nlimbs] = cy; in mpi_sub_ui()
65 if (u->nlimbs == 1 && u->d[0] < vval) { in mpi_sub_ui()
66 w->d[0] = vval - u->d[0]; in mpi_sub_ui()
70 mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); in mpi_sub_ui()
72 w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); in mpi_sub_ui()
A Dmpi-internal.h63 #define MPN_COPY(d, s, n) \ argument
67 (d)[_i] = (s)[_i]; \
70 #define MPN_COPY_DECR(d, s, n) \ argument
74 (d)[_i] = (s)[_i]; \
78 #define MPN_ZERO(d, n) \ argument
82 (d)[_i] = 0; \
85 #define MPN_NORMALIZE(d, n) \ argument
88 if ((d)[(n)-1]) \
115 umul_ppmm(_xh, _xl, _q, (d)); \
125 if (_r >= (d)) { \
[all …]
A Dmpi-pow.c48 rp = res->d; in mpi_powm()
49 ep = exp->d; in mpi_powm()
61 rp = res->d; in mpi_powm()
77 mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); in mpi_powm()
79 MPN_COPY(mp, mod->d, msize); in mpi_powm()
89 MPN_COPY(bp, base->d, bsize); in mpi_powm()
98 bp = base->d; in mpi_powm()
118 rp = res->d; in mpi_powm()
261 rp = res->d; in mpi_powm()
267 MPN_COPY(res->d, rp, rsize); in mpi_powm()
[all …]
/lib/math/
A Dreciprocal_div.c17 struct reciprocal_value reciprocal_value(u32 d) in reciprocal_value() argument
23 l = fls(d - 1); in reciprocal_value()
24 m = ((1ULL << 32) * ((1ULL << l) - d)); in reciprocal_value()
25 do_div(m, d); in reciprocal_value()
35 struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec) in reciprocal_value_adv() argument
42 l = fls(d - 1); in reciprocal_value_adv()
49 d, __func__); in reciprocal_value_adv()
52 do_div(mlow, d); in reciprocal_value_adv()
54 do_div(mhigh, d); in reciprocal_value_adv()
A Drational.c51 unsigned long n, d, n0, d0, n1, d1, n2, d2; in rational_best_approximation() local
53 d = given_denominator; in rational_best_approximation()
60 if (d == 0) in rational_best_approximation()
65 dp = d; in rational_best_approximation()
66 a = n / d; in rational_best_approximation()
67 d = n % d; in rational_best_approximation()
94 if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) { in rational_best_approximation()
/lib/crypto/powerpc/
A Dsha1-spe-asm.S121 add d,d,rK; /* 2: E = E + K */ \
124 add d,d,w1; /* 2: E = E + W */ \
126 add d,d,rT0; /* 2: E = E + A' */ \
128 add d,d,rT2 /* 2: E = E + F */
147 add d,d,rT1; /* 2: E = E + WK */ \
152 add d,d,rT0; /* 2: E = E + A' */ \
154 add d,d,rT1 /* 2: E = E + F */
173 add d,d,rT1; /* 2: E = E + WK */ \
176 add d,d,rT2; /* 2: E = E + F */ \
178 add d,d,rT0 /* 2: E = E + A' */
[all …]
/lib/zstd/common/
A Dzstd_deps.h32 #define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n)) argument
33 #define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n)) argument
34 #define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n)) argument
/lib/842/
A D842_decompress.c59 #define beN_to_cpu(d, s) \ argument
60 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
61 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
62 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
80 ret = next_bits(p, d, s); in __split_next_bits()
83 *d |= tmp << s; in __split_next_bits()
100 return __split_next_bits(p, d, n, 32); in next_bits()
102 return __split_next_bits(p, d, n, 16); in next_bits()
104 return __split_next_bits(p, d, n, 8); in next_bits()
110 *d = *in >> (8 - bits); in next_bits()
[all …]
/lib/crypto/x86/
A Dsha256-avx2-asm.S174 add h, d # d = k + w + h + d # --
192 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
223 add h, d # d = k + w + h + d # --
245 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
274 add h, d # d = k + w + h + d # --
295 add y2,d # d = k + w + h + d + S1 + CH = d + t1 # --
336 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
388 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
428 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
468 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
[all …]
A Dsha512-avx2-asm.S196 add h, d # d = k + w + h + d # --
211 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
260 add h, d # d = k + w + h + d # --
276 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
315 add h, d # d = k + w + h + d # --
332 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
383 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
435 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
473 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
511 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
[all …]

Completed in 28 milliseconds

1234