| /arch/arm64/crypto/ |
| A D | sm4-ce-asm.h | 60 sm4e b2.4s, v24.4s; \ 64 sm4e b2.4s, v25.4s; \ 68 sm4e b2.4s, v26.4s; \ 92 rev64 b2.4s, b2.4s; \ 96 ext b2.16b, b2.16b, b2.16b, #8; \ 100 rev32 b2.16b, b2.16b; \ 106 rev32 b2.16b, b2.16b; \ 177 rev64 b2.4s, b2.4s; \ 185 ext b2.16b, b2.16b, b2.16b, #8; \ 193 rev32 b2.16b, b2.16b; \ [all …]
|
| A D | sm4-neon-core.S | 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 146 rev32 b2.16b, b2.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 157 rev32 b2.16b, b2.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); 223 rev32 b2.16b, b2.16b; \ 235 ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ [all …]
|
| A D | aes-neonbs-core.S | 26 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 27 eor \b2, \b2, \b1 30 eor \b6, \b6, \b2 37 eor \b2, \b2, \b7 46 eor \b2, \b2, \b0 52 eor \b2, \b2, \b5 61 eor \b2, \b2, \b5 64 eor \b2, \b2, \b0 73 eor \b2, \b2, \b7 80 eor \b6, \b6, \b2 [all …]
|
| A D | sm4-ce-gcm-core.S | 139 rev32 b2.16b, b2.16b; \ 145 sm4e b2.4s, v24.4s; \ 151 sm4e b2.4s, v25.4s; \ 157 sm4e b2.4s, v26.4s; \ 163 sm4e b2.4s, v27.4s; \ 169 sm4e b2.4s, v28.4s; \ 175 sm4e b2.4s, v29.4s; \ 181 sm4e b2.4s, v30.4s; \ 193 rev64 b2.4s, b2.4s; \ 199 ext b2.16b, b2.16b, b2.16b, #8; \ [all …]
|
| /arch/arm/include/asm/ |
| A D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 56 register unsigned int b2 __asm__("r9"); in xor_arm4regs_2() 78 register unsigned int b2 __asm__("r9"); in xor_arm4regs_3() 100 register unsigned int b2 __asm__("lr"); in xor_arm4regs_4() 122 register unsigned int b2 __asm__("lr"); in xor_arm4regs_5()
|
| /arch/s390/include/asm/ |
| A D | fpu-insn-asm.h | 288 GR_NUM b2, "%r0" 320 GR_NUM b2, \base 330 GR_NUM b2, \base 371 GR_NUM b2, \base 394 GR_NUM b2, \base 404 GR_NUM b2, \base 414 GR_NUM b2, \base 436 GR_NUM b2, \base 549 GR_NUM b2, \base 559 GR_NUM b2, \base [all …]
|
| /arch/powerpc/kernel/vdso/ |
| A D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 54 add \a2, \a2, \b2 70 xor \b2, \b2, \c2 74 rotlwi \b2, \b2, 12 78 add \a2, \a2, \b2 94 xor \b2, \b2, \c2 98 rotlwi \b2, \b2, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 105 state##a2 state##b2 state##c2 state##d2 \
|
| /arch/x86/lib/ |
| A D | insn.c | 214 if (X86_MODRM_MOD(b2) != 3) in insn_get_prefixes() 218 insn_set_byte(&insn->vex_prefix, 1, b2); in insn_get_prefixes() 220 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 221 insn_set_byte(&insn->vex_prefix, 2, b2); in insn_get_prefixes() 222 b2 = peek_nbyte_next(insn_byte_t, insn, 3); in insn_get_prefixes() 223 insn_set_byte(&insn->vex_prefix, 3, b2); in insn_get_prefixes() 226 if (insn->x86_64 && X86_VEX_W(b2)) in insn_get_prefixes() 230 b2 = peek_nbyte_next(insn_byte_t, insn, 2); in insn_get_prefixes() 231 insn_set_byte(&insn->vex_prefix, 2, b2); in insn_get_prefixes() 234 if (insn->x86_64 && X86_VEX_W(b2)) in insn_get_prefixes() [all …]
|
| /arch/x86/crypto/ |
| A D | cast5-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 131 F_head(b2, RX, RGI3, RGI4, op0); \ 134 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 146 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
| A D | cast6-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 131 F_head(b2, RX, RGI3, RGI4, op0); \ 134 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
| A D | aria-gfni-avx512-asm_64.S | 68 a2, b2, c2, d2, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 89 vpshufb a0, b2, b2; \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 120 a2, b2, c2, d2, \ argument 126 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 141 vpshufb a0, b2, b2; \ 164 transpose_4x4(c2, d2, a2, b2, b0, b1); \
|
| A D | aria-aesni-avx-asm_64.S | 68 a2, b2, c2, d2, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 89 vpshufb a0, b2, b2; \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 120 a2, b2, c2, d2, \ argument 126 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 141 vpshufb a0, b2, b2; \ 164 transpose_4x4(c2, d2, a2, b2, b0, b1); \
|
| A D | aria-aesni-avx2-asm_64.S | 84 a2, b2, c2, d2, \ argument 90 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 105 vpshufb a0, b2, b2; \ 128 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 136 a2, b2, c2, d2, \ argument 142 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 157 vpshufb a0, b2, b2; \ 180 transpose_4x4(c2, d2, a2, b2, b0, b1); \
|
| A D | camellia-aesni-avx-asm_64.S | 433 #define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \ argument 438 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 453 vpshufb a0, b2, b2; \ 476 transpose_4x4(a2, b2, c2, d2, b0, b1); \
|
| A D | camellia-aesni-avx2-asm_64.S | 465 #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ argument 470 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 485 vpshufb a0, b2, b2; \ 508 transpose_4x4(a2, b2, c2, d2, b0, b1); \
|
| /arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 80 .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 81 veor \b2, \b2, \b1 84 veor \b6, \b6, \b2 91 veor \b2, \b2, \b7 100 veor \b2, \b2, \b0 106 veor \b2, \b2, \b5 115 veor \b2, \b2, \b5 118 veor \b2, \b2, \b0 127 veor \b2, \b2, \b7 134 veor \b6, \b6, \b2 [all …]
|
| A D | ghash-ce-core.S | 96 .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4 110 .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l 118 .ifc \b2, t3l 123 vmull.p8 t3q, \ad, \b2 @ G = A*B2
|
| /arch/x86/net/ |
| A D | bpf_jit_comp.c | 41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 904 u8 b1, b2, b3; in emit_mov_imm32() local 913 b2 = 0xC7; in emit_mov_imm32() 1437 u8 b1, b2; in emit_3vex() local 1464 EMIT3(b0, b1, b2); in emit_3vex() 1735 b2 = 0x05; in do_jit() 1739 b2 = 0x2D; in do_jit() 1743 b2 = 0x25; in do_jit() 1747 b2 = 0x0D; in do_jit() [all …]
|
| A D | bpf_jit_comp32.c | 68 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 69 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 70 #define EMIT4(b1, b2, b3, b4) \ argument 75 #define EMIT2_off32(b1, b2, off) \ argument 77 #define EMIT3_off32(b1, b2, b3, off) \ argument 79 #define EMIT4_off32(b1, b2, b3, b4, off) \ argument 488 u8 b2; in emit_ia32_shift_r() local 503 b2 = 0xE0; break; in emit_ia32_shift_r() 505 b2 = 0xE8; break; in emit_ia32_shift_r() 507 b2 = 0xF8; break; in emit_ia32_shift_r() [all …]
|
| /arch/s390/mm/ |
| A D | extable.c | 82 u64 b2 : 4; member 109 uaddr = regs->gprs[insn->b2] + insn->d2; in ex_handler_ua_mvcos()
|
| /arch/s390/net/ |
| A D | bpf_jit_comp.c | 156 #define EMIT2(op, b1, b2) \ argument 158 _EMIT2((op) | reg(b1, b2)); \ 160 REG_SET_SEEN(b2); \ 170 #define EMIT4(op, b1, b2) \ argument 172 _EMIT4((op) | reg(b1, b2)); \ 174 REG_SET_SEEN(b2); \ 181 REG_SET_SEEN(b2); \ 196 REG_SET_SEEN(b2); \ 246 REG_SET_SEEN(b2); \ 256 REG_SET_SEEN(b2); \ [all …]
|
| /arch/powerpc/crypto/ |
| A D | aes-tab-4k.S | 43 .long R(ef, fa, fa, 15), R(b2, 59, 59, eb) 64 .long R(7f, b2, b2, cd), R(ea, 75, 75, 9f) 67 .long R(36, 1b, 1b, 2d), R(dc, 6e, 6e, b2) 192 .long R(b2, eb, 28, 07), R(2f, b5, c2, 03) 194 .long R(30, 28, 87, f2), R(23, bf, a5, b2) 238 .long R(1d, 9e, 2f, 4b), R(dc, b2, 30, f3) 262 .long R(31, a4, b2, af), R(2a, 3f, 23, 31)
|
| /arch/arm/nwfpe/ |
| A D | softfloat-macros | 359 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is 372 bits64 b2, 381 z2 = a2 + b2; 416 Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' 430 bits64 b2, 439 z2 = a2 - b2; 440 borrow1 = ( a2 < b2 );
|
| /arch/x86/kernel/ |
| A D | uprobes.c | 47 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 48 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
| /arch/arm/boot/dts/aspeed/ |
| A D | aspeed-bmc-opp-palmetto.dts | 254 pin-gpio-b2-hog {
|