| /linux/arch/arm64/crypto/ |
| A D | sm4-ce-asm.h | 46 rev64 b1.4s, b1.4s; \ 48 ext b1.16b, b1.16b, b1.16b, #8; \ 50 rev32 b1.16b, b1.16b; \ 54 rev32 b1.16b, b1.16b; \ 91 rev64 b1.4s, b1.4s; \ 95 ext b1.16b, b1.16b, b1.16b, #8; \ 99 rev32 b1.16b, b1.16b; \ 105 rev32 b1.16b, b1.16b; \ 176 rev64 b1.4s, b1.4s; \ 184 ext b1.16b, b1.16b, b1.16b, #8; \ [all …]
|
| A D | sm4-neon-core.S | 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 145 rev32 b1.16b, b1.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 156 rev32 b1.16b, b1.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); 222 rev32 b1.16b, b1.16b; \ 235 ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ [all …]
|
| A D | aes-neonbs-core.S | 27 eor \b2, \b2, \b1 38 eor \b3, \b3, \b1 39 eor \b1, \b1, \b5 44 eor \b1, \b1, \b4 47 eor \b6, \b6, \b1 48 eor \b1, \b1, \b5 57 eor \b1, \b1, \b7 60 eor \b1, \b1, \b3 68 eor \b1, \b1, \b4 72 eor \b1, \b1, \b5 [all …]
|
| /linux/crypto/ |
| A D | aes_generic.c | 1179 u32 b0[4], b1[4]; in crypto_aes_encrypt() local 1198 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1199 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1200 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1201 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1202 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1203 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1204 f_nround(b1, b0, kp); in crypto_aes_encrypt() 1205 f_nround(b0, b1, kp); in crypto_aes_encrypt() 1206 f_nround(b1, b0, kp); in crypto_aes_encrypt() [all …]
|
| A D | xor.c | 83 do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) in do_xor_speed() argument 101 tmpl->do_2(BENCH_SIZE, b1, b2); in do_xor_speed() 118 void *b1, *b2; in calibrate_xor_blocks() local 130 b1 = (void *) __get_free_pages(GFP_KERNEL, 2); in calibrate_xor_blocks() 131 if (!b1) { in calibrate_xor_blocks() 135 b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; in calibrate_xor_blocks() 142 #define xor_speed(templ) do_xor_speed((templ), b1, b2) in calibrate_xor_blocks() 157 free_pages((unsigned long)b1, 2); in calibrate_xor_blocks()
|
| /linux/drivers/atm/ |
| A D | fore200e.h | 71 #define BITFIELD2(b1, b2) b1; b2; argument 72 #define BITFIELD3(b1, b2, b3) b1; b2; b3; argument 73 #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4; argument 74 #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5; argument 75 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6; argument 77 #define BITFIELD2(b1, b2) b2; b1; argument 78 #define BITFIELD3(b1, b2, b3) b3; b2; b1; argument 79 #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1; argument 80 #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1; argument 81 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1; argument
|
| /linux/Documentation/arch/arm64/ |
| A D | elf_hwcaps.rst | 252 Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1. 267 Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1. 300 Functionality implied by ID_AA64SMFR0_EL1.B16B16 == 0b1 303 Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1 330 Functionality implied by ID_AA64FPFR0_EL1.F8CVT == 0b1. 333 Functionality implied by ID_AA64FPFR0_EL1.F8FMA == 0b1. 336 Functionality implied by ID_AA64FPFR0_EL1.F8DP4 == 0b1. 339 Functionality implied by ID_AA64FPFR0_EL1.F8DP2 == 0b1. 348 Functionality implied by ID_AA64SMFR0_EL1.LUTv2 == 0b1. 351 Functionality implied by ID_AA64SMFR0_EL1.F8F16 == 0b1. [all …]
|
| A D | booting.rst | 221 - SCR_EL3.HCE (bit 8) must be initialised to 0b1. 256 - SCR_EL3.APK (bit 16) must be initialised to 0b1 257 - SCR_EL3.API (bit 17) must be initialised to 0b1 261 - HCR_EL2.APK (bit 40) must be initialised to 0b1 262 - HCR_EL2.API (bit 41) must be initialised to 0b1 292 - SCR_EL3.HXEn (bit 38) must be initialised to 0b1. 308 - CPTR_EL3.EZ (bit 8) must be initialised to 0b1. 326 - CPTR_EL3.ESM (bit 12) must be initialised to 0b1. 366 - SCR_EL3.ATA (bit 26) must be initialised to 0b1. 370 - HCR_EL2.ATA (bit 56) must be initialised to 0b1. [all …]
|
| /linux/drivers/isdn/mISDN/ |
| A D | dsp_biquad.h | 19 int32_t b1; member 27 int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2) in biquad2_init() argument 32 bq->b1 = b1; in biquad2_init() 45 y = z0 + bq->z1 * bq->b1 + bq->z2 * bq->b2; in biquad2()
|
| /linux/fs/f2fs/ |
| A D | hash.c | 28 __u32 b0 = buf[0], b1 = buf[1]; in TEA_transform() local 34 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); in TEA_transform() 35 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); in TEA_transform() 39 buf[1] += b1; in TEA_transform()
|
| /linux/arch/arm/include/asm/ |
| A D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 55 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 77 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 99 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 121 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
| /linux/fs/reiserfs/ |
| A D | hashes.c | 28 u32 b0, b1; \ 31 b1 = h1; \ 36 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \ 37 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ 41 h1 += b1; \
|
| /linux/drivers/crypto/nx/ |
| A D | nx-aes-ccm.c | 164 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; in generate_pat() local 192 b1 = nx_ctx->priv.ccm.iauth_tag; in generate_pat() 199 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat() 203 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat() 216 if (b1) { in generate_pat() 217 memset(b1, 0, 16); in generate_pat() 219 *(u16 *)b1 = assoclen; in generate_pat() 220 scatterwalk_map_and_copy(b1 + 2, req->src, 0, in generate_pat() 223 *(u16 *)b1 = (u16)(0xfffe); in generate_pat() 224 *(u32 *)&b1[2] = assoclen; in generate_pat() [all …]
|
| /linux/arch/arm/nwfpe/ |
| A D | softfloat-macros | 350 z1 = a1 + b1; 371 bits64 b1, 383 z1 = a1 + b1; 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); 429 bits64 b1, 441 z1 = a1 - b1; 442 borrow0 = ( a1 < b1 ); 525 bits64 b1, 562 bits64 b0, b1; [all …]
|
| /linux/arch/riscv/crypto/ |
| A D | chacha-riscv64-zvkb.S | 76 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \ 80 vadd.vv \a1, \a1, \b1 98 vxor.vv \b1, \b1, \c1 102 vror.vi \b1, \b1, 32 - 12 108 vadd.vv \a1, \a1, \b1 126 vxor.vv \b1, \b1, \c1 130 vror.vi \b1, \b1, 32 - 7
|
| /linux/tools/mm/ |
| A D | slabinfo.c | 999 b1, b2, b3, b4); in totals() 1004 b1, b2, b3, b4); in totals() 1009 b1, b2, b3, b4); in totals() 1014 b1, b2, b3, b4); in totals() 1020 b1, b2, b3, b4); in totals() 1026 b1, b2, b3, b4); in totals() 1031 b1, b2, b3, b4); in totals() 1036 b1, b2, b3, b4); in totals() 1052 b1, b2, b3); in totals() 1056 b1, b2, b3); in totals() [all …]
|
| /linux/arch/powerpc/kernel/vdso/ |
| A D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 53 add \a1, \a1, \b1 69 xor \b1, \b1, \c1 73 rotlwi \b1, \b1, 12 77 add \a1, \a1, \b1 93 xor \b1, \b1, \c1 97 rotlwi \b1, \b1, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 104 quarterround4 state##a1 state##b1 state##c1 state##d1 \
|
| /linux/arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 81 veor \b2, \b2, \b1 92 veor \b3, \b3, \b1 93 veor \b1, \b1, \b5 98 veor \b1, \b1, \b4 101 veor \b6, \b6, \b1 102 veor \b1, \b1, \b5 111 veor \b1, \b1, \b7 114 veor \b1, \b1, \b3 122 veor \b1, \b1, \b4 126 veor \b1, \b1, \b5 [all …]
|
| A D | blake2s-core.S | 68 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3 75 add \a1, \a1, \b1, ror #brot 89 eor \b1, \c1, \b1, ror #brot 96 add \a1, \a1, \b1, ror #12 110 eor \b1, \c1, \b1, ror#12
|
| /linux/arch/xtensa/platforms/iss/include/platform/ |
| A D | simcall-iss.h | 61 register int b1 asm("a3") = b; in __simc() 66 : "+r"(a1), "+r"(b1) in __simc() 69 errno = b1; in __simc()
|
| /linux/scripts/ |
| A D | parse-maintainers.pl | 79 my $b1 = uc(substr($b, 0, 1)); 82 my $b_index = index($preferred_order, $b1); 87 if (($a1 =~ /^F$/ && $b1 =~ /^F$/) || 88 ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
|
| /linux/arch/s390/net/ |
| A D | bpf_jit_comp.c | 124 u32 r1 = reg2hex[b1]; in reg_set_seen() 149 REG_SET_SEEN(b1); \ 163 REG_SET_SEEN(b1); \ 170 REG_SET_SEEN(b1); \ 185 REG_SET_SEEN(b1); \ 193 REG_SET_SEEN(b1); \ 235 REG_SET_SEEN(b1); \ 245 REG_SET_SEEN(b1); \ 254 REG_SET_SEEN(b1); \ 262 REG_SET_SEEN(b1); \ [all …]
|
| /linux/drivers/mtd/nand/ |
| A D | ecc-sw-hamming.c | 378 unsigned char b0, b1, b2, bit_addr; in ecc_sw_hamming_correct() local 388 b1 = read_ecc[1] ^ calc_ecc[1]; in ecc_sw_hamming_correct() 391 b1 = read_ecc[0] ^ calc_ecc[0]; in ecc_sw_hamming_correct() 401 if ((b0 | b1 | b2) == 0) in ecc_sw_hamming_correct() 405 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && in ecc_sw_hamming_correct() 426 byte_addr = (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 429 (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct() 437 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) in ecc_sw_hamming_correct()
|
| /linux/arch/x86/crypto/ |
| A D | cast6-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
| A D | cast5-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 146 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|