/lib/ |
A D | hweight.c | 16 w -= (w >> 1) & 0x55555555; in __sw_hweight32() 17 w = (w & 0x33333333) + ((w >> 2) & 0x33333333); in __sw_hweight32() 18 w = (w + (w >> 4)) & 0x0f0f0f0f; in __sw_hweight32() 19 return (w * 0x01010101) >> 24; in __sw_hweight32() 21 unsigned int res = w - ((w >> 1) & 0x55555555); in __sw_hweight32() 32 unsigned int res = w - ((w >> 1) & 0x5555); in __sw_hweight16() 41 unsigned int res = w - ((w >> 1) & 0x55); in __sw_hweight8() 54 w -= (w >> 1) & 0x5555555555555555ul; in __sw_hweight64() 55 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); in __sw_hweight64() 56 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; in __sw_hweight64() [all …]
|
A D | once.c | 14 static void once_deferred(struct work_struct *w) in once_deferred() argument 18 work = container_of(w, struct once_work, work); in once_deferred() 27 struct once_work *w; in once_disable_jump() local 29 w = kmalloc(sizeof(*w), GFP_ATOMIC); in once_disable_jump() 30 if (!w) in once_disable_jump() 33 INIT_WORK(&w->work, once_deferred); in once_disable_jump() 34 w->key = key; in once_disable_jump() 35 w->module = mod; in once_disable_jump() 37 schedule_work(&w->work); in once_disable_jump()
|
A D | inflate.c | 171 #define flush_output(w) (wp=(w),flush_window()) argument 463 while (k > w + l) in huft_build() 548 w -= l; in huft_build() 631 if (w == WSIZE) in inflate_codes() 634 w = 0; in inflate_codes() 666 n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); in inflate_codes() 671 w += e; in inflate_codes() 683 w = 0; in inflate_codes() 740 if (w == WSIZE) in inflate_stored() 742 flush_output(w); in inflate_stored() [all …]
|
A D | cmpxchg-emu.c | 21 u32 w; member 35 old32.w = ret; in cmpxchg_emu_u8() 38 new32.w = old32.w; in cmpxchg_emu_u8() 41 ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above. in cmpxchg_emu_u8() 42 } while (ret != old32.w); in cmpxchg_emu_u8()
|
A D | lshrdi3.c | 11 DWunion uu, w; in __lshrdi3() local 21 w.s.high = 0; in __lshrdi3() 22 w.s.low = (unsigned int) uu.s.high >> -bm; in __lshrdi3() 26 w.s.high = (unsigned int) uu.s.high >> b; in __lshrdi3() 27 w.s.low = ((unsigned int) uu.s.low >> b) | carries; in __lshrdi3() 30 return w.ll; in __lshrdi3()
|
A D | ashldi3.c | 11 DWunion uu, w; in __ashldi3() local 21 w.s.low = 0; in __ashldi3() 22 w.s.high = (unsigned int) uu.s.low << -bm; in __ashldi3() 26 w.s.low = (unsigned int) uu.s.low << b; in __ashldi3() 27 w.s.high = ((unsigned int) uu.s.high << b) | carries; in __ashldi3() 30 return w.ll; in __ashldi3()
|
A D | ashrdi3.c | 11 DWunion uu, w; in __ashrdi3() local 22 w.s.high = in __ashrdi3() 24 w.s.low = uu.s.high >> -bm; in __ashrdi3() 28 w.s.high = uu.s.high >> b; in __ashrdi3() 29 w.s.low = ((unsigned int) uu.s.low >> b) | carries; in __ashrdi3() 32 return w.ll; in __ashrdi3()
|
A D | find_bit.c | 74 unsigned long sz = (size), nr = (num), idx, w, tmp; \ 81 w = hweight_long(tmp); \ 82 if (w > nr) \ 85 nr -= w; \ 305 int w = bitmap_weight(addr, size); in find_random_bit() local 307 switch (w) { in find_random_bit() 314 return find_nth_bit(addr, size, get_random_u32_below(w)); in find_random_bit()
|
A D | bitmap.c | 327 unsigned int __bits = (bits), idx, w = 0; \ 330 w += hweight_long(FETCH); \ 333 w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \ 335 w; \ 501 unsigned int oldbit, w; in bitmap_remap() local 507 w = bitmap_weight(new, nbits); in bitmap_remap() 511 if (n < 0 || w == 0) in bitmap_remap() 514 set_bit(find_nth_bit(new, nbits, n % w), dst); in bitmap_remap() 548 int w = bitmap_weight(new, bits); in bitmap_bitremap() local 550 if (n < 0 || w == 0) in bitmap_bitremap() [all …]
|
A D | muldi3.c | 53 DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)}; in __muldi3() local 55 w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high in __muldi3() 58 return w.ll; in __muldi3()
|
A D | checksum.c | 64 unsigned int w = *(unsigned int *) buff; in do_csum() local 67 result += w; in do_csum() 68 carry = (w > result); in do_csum()
|
A D | find_bit_benchmark.c | 120 unsigned long l, n, w = bitmap_weight(bitmap, len); in test_find_nth_bit() local 124 for (n = 0; n < w; n++) { in test_find_nth_bit() 129 pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w); in test_find_nth_bit()
|
A D | sort.c | 337 struct wrapper w = { in sort() local 342 return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false); in sort() 350 struct wrapper w = { in sort_nonatomic() local 355 return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true); in sort_nonatomic()
|
/lib/crypto/mpi/ |
A D | mpi-sub-ui.c | 42 if (mpi_resize(w, 1) < 0) in mpi_sub_ui() 44 w->d[0] = vval; in mpi_sub_ui() 45 w->nlimbs = (vval != 0); in mpi_sub_ui() 46 w->sign = (vval != 0); in mpi_sub_ui() 58 w->d[u->nlimbs] = cy; in mpi_sub_ui() 60 w->sign = 1; in mpi_sub_ui() 67 w->nlimbs = 1; in mpi_sub_ui() 68 w->sign = 1; in mpi_sub_ui() 72 w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); in mpi_sub_ui() 73 w->sign = 0; in mpi_sub_ui() [all …]
|
A D | mpi-add.c | 18 int mpi_add(MPI w, MPI u, MPI v) in mpi_add() argument 31 err = RESIZE_IF_NEEDED(w, wsize); in mpi_add() 50 wp = w->d; in mpi_add() 85 w->nlimbs = wsize; in mpi_add() 86 w->sign = wsign; in mpi_add() 91 int mpi_sub(MPI w, MPI u, MPI v) in mpi_sub() argument 101 err = mpi_add(w, u, vv); in mpi_sub() 110 return mpi_add(w, u, v) ?: in mpi_addm() 111 mpi_mod(w, w, m); in mpi_addm() 117 return mpi_sub(w, u, v) ?: in mpi_subm() [all …]
|
A D | mpi-mul.c | 18 int mpi_mul(MPI w, MPI u, MPI v) in mpi_mul() argument 45 wp = w->d; in mpi_mul() 49 if (w->alloced < wsize) { in mpi_mul() 56 err = mpi_resize(w, wsize); in mpi_mul() 59 wp = w->d; in mpi_mul() 95 mpi_assign_limb_space(w, wp, wsize); in mpi_mul() 96 w->nlimbs = wsize; in mpi_mul() 97 w->sign = sign_product; in mpi_mul() 106 int mpi_mulm(MPI w, MPI u, MPI v, MPI m) in mpi_mulm() argument 108 return mpi_mul(w, u, v) ?: in mpi_mulm() [all …]
|
/lib/crypto/x86/ |
A D | sha512-avx2-asm.S | 167 # Calculate w[t-16] + w[t-7] 229 # Add three components, w[t-16], w[t-7] and sigma0 231 # Move to appropriate lanes for calculating w[16] and w[17] 233 # Move to appropriate lanes for calculating w[18] and w[19] 236 # Calculate w[16] and w[17] in both 128 bit lanes 238 # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes 295 # Add sigma1 to the other compunents to get w[16] and w[17] 298 # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane 351 # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] 352 # to newly calculated sigma1 to get w[18] and w[19] [all …]
|
A D | sha256-avx2-asm.S | 160 addl \disp(%rsp, SRND), h # h = k + w + h # -- 174 add h, d # d = k + w + h + d # -- 190 add y1, h # h = k + w + h + S0 # -- 192 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- 196 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- 223 add h, d # d = k + w + h + d # -- 274 add h, d # d = k + w + h + d # -- 375 addl \disp(%rsp, SRND), h # h = k + w + h # -- 414 addl offset(%rsp, SRND), h # h = k + w + h # -- 454 addl offset(%rsp, SRND), h # h = k + w + h # -- [all …]
|
A D | sha256-avx-asm.S | 178 add _XFER(%rsp), y2 # y2 = k + w + S1 + CH 180 add y2, h # h = h + S1 + CH + k + w 184 add h, d # d = d + h + S1 + CH + k + w 213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 217 add y2, h # h = h + S1 + CH + k + w 221 add h, d # d = d + h + S1 + CH + k + w 252 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 255 add y2, h # h = h + S1 + CH + k + w 290 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 293 add y2, h # h = h + S1 + CH + k + w [all …]
|
A D | sha256-ssse3-asm.S | 174 add _XFER(%rsp) , y2 # y2 = k + w + S1 + CH 177 add y2, h # h = h + S1 + CH + k + w 213 add (1*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 217 add y2, h # h = h + S1 + CH + k + w 255 add (2*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 258 add y2, h # h = h + S1 + CH + k + w 296 add (3*4 + _XFER)(%rsp), y2 # y2 = k + w + S1 + CH 299 add y2, h # h = h + S1 + CH + k + w 335 add offset(%rsp), y2 # y2 = k + w + S1 + CH 337 add y2, h # h = h + S1 + CH + k + w [all …]
|
/lib/zstd/decompress/ |
A D | huf_decompress.c | 452 { U32 w; in HUF_readDTableX1_wksp() local 455 for (w=1; w<tableLog+1; ++w) { in HUF_readDTableX1_wksp() 1106 int w; in HUF_fillDTableX2Level2() local 1107 for (w = minWeight; w < maxWeight1; ++w) { in HUF_fillDTableX2Level2() 1129 int w; in HUF_fillDTableX2() local 1133 for (w = 1; w < wEnd; ++w) { in HUF_fillDTableX2() 1212 for (w=1; w<maxW+1; w++) { in HUF_readDTableX2_wksp() 1236 for (w=1; w<maxW+1; w++) { in HUF_readDTableX2_wksp() 1238 nextRankVal += wksp->rankStats[w] << (w+rescale); in HUF_readDTableX2_wksp() 1246 for (w = 1; w < maxW+1; w++) { in HUF_readDTableX2_wksp() [all …]
|
/lib/crypto/ |
A D | aes.c | 92 static u32 mul_by_x(u32 w) in mul_by_x() argument 94 u32 x = w & 0x7f7f7f7f; in mul_by_x() 95 u32 y = w & 0x80808080; in mul_by_x() 101 static u32 mul_by_x2(u32 w) in mul_by_x2() argument 103 u32 x = w & 0x3f3f3f3f; in mul_by_x2() 104 u32 y = w & 0x80808080; in mul_by_x2() 105 u32 z = w & 0x40404040; in mul_by_x2()
|
A D | sha1.c | 244 unsigned long w[SHA1_BLOCK_SIZE / sizeof(unsigned long)]; in __hmac_sha1_preparekey() member 252 for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) in __hmac_sha1_preparekey() 253 derived_key.w[i] ^= REPEAT_BYTE(HMAC_IPAD_VALUE); in __hmac_sha1_preparekey() 257 for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) in __hmac_sha1_preparekey() 258 derived_key.w[i] ^= REPEAT_BYTE(HMAC_OPAD_VALUE ^ in __hmac_sha1_preparekey()
|
/lib/crc/mips/ |
A D | crc32.h | 78 CRC32(crc, value, w); in crc32_le_arch() 85 CRC32(crc, value, w); in crc32_le_arch() 121 CRC32C(crc, value, w); in crc32c_arch() 128 CRC32C(crc, value, w); in crc32c_arch()
|
/lib/crypto/powerpc/ |
A D | sha256-spe-asm.S | 101 #define R_LOAD_W(a, b, c, d, e, f, g, h, w, off) \ argument 102 LOAD_DATA(w, off) /* 1: W */ \ 113 add rT3,rT3,w; /* 1: temp1' = ch + w */ \ 122 evmergelo w,w,w; /* shift W */ \ 126 LOAD_DATA(w, off+4) /* 2: W */ \ 140 add rT3,rT3,w; /* 2: temp1' = ch + w */ \
|