| /arch/arm64/crypto/ |
| A D | sm4-ce-asm.h | 61 sm4e b3.4s, v24.4s; \ 65 sm4e b3.4s, v25.4s; \ 69 sm4e b3.4s, v26.4s; \ 93 rev64 b3.4s, b3.4s; \ 97 ext b3.16b, b3.16b, b3.16b, #8; \ 101 rev32 b3.16b, b3.16b; 107 rev32 b3.16b, b3.16b; \ 178 rev64 b3.4s, b3.4s; \ 186 ext b3.16b, b3.16b, b3.16b, #8; \ 194 rev32 b3.16b, b3.16b; \ [all …]
|
| A D | sm4-neon-core.S | 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 147 rev32 b3.16b, b3.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 158 rev32 b3.16b, b3.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); 224 rev32 b3.16b, b3.16b; \ 235 ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ [all …]
|
| A D | aes-neonbs-core.S | 29 eor \b3, \b3, \b0 32 eor \b6, \b6, \b3 33 eor \b3, \b3, \b7 35 eor \b3, \b3, \b4 38 eor \b3, \b3, \b1 50 eor \b3, \b3, \b7 62 eor \b3, \b3, \b7 74 eor \b3, \b3, \b1 77 eor \b3, \b3, \b4 79 eor \b3, \b3, \b7 [all …]
|
| A D | ghash-ce-core.S | 99 .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4 105 pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3
|
| /arch/powerpc/kernel/vdso/ |
| A D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 55 add \a3, \a3, \b3 71 xor \b3, \b3, \c3 75 rotlwi \b3, \b3, 12 79 add \a3, \a3, \b3 95 xor \b3, \b3, \c3 99 rotlwi \b3, \b3, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 106 state##a3 state##b3 state##c3 state##d3 \
|
| /arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 83 veor \b3, \b3, \b0 86 veor \b6, \b6, \b3 87 veor \b3, \b3, \b7 89 veor \b3, \b3, \b4 92 veor \b3, \b3, \b1 104 veor \b3, \b3, \b7 116 veor \b3, \b3, \b7 128 veor \b3, \b3, \b1 131 veor \b3, \b3, \b4 133 veor \b3, \b3, \b7 [all …]
|
| A D | ghash-ce-core.S | 96 .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4 110 .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l 125 .ifc \b3, t4l 134 vmull.p8 t4q, \ad, \b3 @ I = A*B3
|
| /arch/arm/include/asm/ |
| A D | xor.h | 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 57 register unsigned int b3 __asm__("ip"); in xor_arm4regs_2() 79 register unsigned int b3 __asm__("ip"); in xor_arm4regs_3()
|
| /arch/x86/net/ |
| A D | bpf_jit_comp.c | 42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 904 u8 b1, b2, b3; in emit_mov_imm32() local 914 b3 = 0xC0; in emit_mov_imm32() 927 b3 = 0xC0; in emit_mov_imm32() 1618 u8 b2 = 0, b3 = 0; in do_jit() local 1734 b3 = 0xC0; in do_jit() 1738 b3 = 0xE8; in do_jit() 1742 b3 = 0xE0; in do_jit() 1746 b3 = 0xC8; in do_jit() [all …]
|
| A D | bpf_jit_comp32.c | 69 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 70 #define EMIT4(b1, b2, b3, b4) \ argument 71 EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) 77 #define EMIT3_off32(b1, b2, b3, off) \ argument 78 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) 79 #define EMIT4_off32(b1, b2, b3, b4, off) \ argument 80 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
|
| /arch/powerpc/crypto/ |
| A D | aes-tab-4k.S | 45 .long R(41, ad, ad, ec), R(b3, d4, d4, 67) 70 .long R(b7, d6, d6, 61), R(7d, b3, b3, ce) 146 .long R(2b, 98, 98, b3), R(22, 11, 11, 33) 240 .long R(2b, b3, 16, 6c), R(a9, 70, b9, 99) 253 .long R(6f, d5, 2d, a9), R(cf, 25, 12, b3) 274 .long R(b3, 67, 1d, 5a), R(92, db, d2, 52) 288 .long R(39, a8, 01, 71), R(08, 0c, b3, de)
|
| /arch/x86/crypto/ |
| A D | aria-gfni-avx512-asm_64.S | 69 a3, b3, c3, d3, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 90 vpshufb a0, b3, b3; \ 113 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 121 a3, b3, c3, d3, \ argument 126 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 142 vpshufb a0, b3, b3; \ 165 transpose_4x4(c3, d3, a3, b3, b0, b1); \
|
| A D | aria-aesni-avx-asm_64.S | 69 a3, b3, c3, d3, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 90 vpshufb a0, b3, b3; \ 113 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 121 a3, b3, c3, d3, \ argument 126 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 142 vpshufb a0, b3, b3; \ 165 transpose_4x4(c3, d3, a3, b3, b0, b1); \
|
| A D | aria-aesni-avx2-asm_64.S | 85 a3, b3, c3, d3, \ argument 90 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 106 vpshufb a0, b3, b3; \ 129 transpose_4x4(a3, b3, c3, d3, b0, b1); \ 137 a3, b3, c3, d3, \ argument 142 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 158 vpshufb a0, b3, b3; \ 181 transpose_4x4(c3, d3, a3, b3, b0, b1); \
|
| A D | camellia-aesni-avx-asm_64.S | 434 b3, c3, d3, st0, st1) \ argument 438 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 454 vpshufb a0, b3, b3; \ 477 transpose_4x4(a3, b3, c3, d3, b0, b1); \
|
| A D | camellia-aesni-avx2-asm_64.S | 466 a3, b3, c3, d3, st0, st1) \ argument 470 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 486 vpshufb a0, b3, b3; \ 509 transpose_4x4(a3, b3, c3, d3, b0, b1); \
|
| /arch/arm/boot/dts/marvell/ |
| A D | kirkwood-b3.dts | 22 compatible = "excito,b3", "marvell,kirkwood-88f6281", "marvell,kirkwood";
|
| A D | Makefile | 86 kirkwood-b3.dtb \
|
| /arch/x86/kernel/ |
| A D | uprobes.c | 47 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 48 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
| /arch/s390/net/ |
| A D | bpf_jit_comp.c | 177 #define EMIT4_RRF(op, b1, b2, b3) \ argument 179 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \ 182 REG_SET_SEEN(b3); \ 241 #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \ argument 244 reg_high(b3) << 8, op2, disp); \ 247 REG_SET_SEEN(b3); \
|
| /arch/x86/kernel/kprobes/ |
| A D | core.c | 64 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ argument 65 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
|
| /arch/x86/lib/ |
| A D | x86-opcode-map.txt | 238 b3: MOV BL/R11L,Ib 535 b3: BTR Ev,Gv
|
| /arch/arm64/boot/dts/rockchip/ |
| A D | rk3588-base.dtsi | 326 l2_cache_b3: l2-cache-b3 {
|