| /arch/arm64/crypto/ |
| A D | sm4-ce-asm.h | 46 rev64 b1.4s, b1.4s; \ 48 ext b1.16b, b1.16b, b1.16b, #8; \ 50 rev32 b1.16b, b1.16b; \ 54 rev32 b1.16b, b1.16b; \ 91 rev64 b1.4s, b1.4s; \ 95 ext b1.16b, b1.16b, b1.16b, #8; \ 99 rev32 b1.16b, b1.16b; \ 105 rev32 b1.16b, b1.16b; \ 176 rev64 b1.4s, b1.4s; \ 184 ext b1.16b, b1.16b, b1.16b, #8; \ [all …]
|
| A D | sm4-neon-core.S | 137 ROUND4(0, b0, b1, b2, b3); \ 138 ROUND4(1, b1, b2, b3, b0); \ 139 ROUND4(2, b2, b3, b0, b1); \ 140 ROUND4(3, b3, b0, b1, b2); \ 145 rev32 b1.16b, b1.16b; \ 149 rotate_clockwise_4x4(b0, b1, b2, b3); \ 156 rev32 b1.16b, b1.16b; \ 159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3); 222 rev32 b1.16b, b1.16b; \ 235 ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ [all …]
|
| A D | aes-neonbs-core.S | 27 eor \b2, \b2, \b1 38 eor \b3, \b3, \b1 39 eor \b1, \b1, \b5 44 eor \b1, \b1, \b4 47 eor \b6, \b6, \b1 48 eor \b1, \b1, \b5 57 eor \b1, \b1, \b7 60 eor \b1, \b1, \b3 68 eor \b1, \b1, \b4 72 eor \b1, \b1, \b5 [all …]
|
| A D | sm4-ce-gcm-core.S | 138 rev32 b1.16b, b1.16b; \ 144 sm4e b1.4s, v24.4s; \ 150 sm4e b1.4s, v25.4s; \ 156 sm4e b1.4s, v26.4s; \ 162 sm4e b1.4s, v27.4s; \ 168 sm4e b1.4s, v28.4s; \ 174 sm4e b1.4s, v29.4s; \ 180 sm4e b1.4s, v30.4s; \ 192 rev64 b1.4s, b1.4s; \ 198 ext b1.16b, b1.16b, b1.16b, #8; \ [all …]
|
| /arch/arm/include/asm/ |
| A D | xor.h | 26 : "=r" (src), "=r" (b1), "=r" (b2) \ 28 __XOR(a1, b1); __XOR(a2, b2); 32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \ 34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4) 55 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2() 77 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3() 99 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4() 121 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
|
| /arch/xtensa/platforms/iss/include/platform/ |
| A D | simcall-iss.h | 61 register int b1 asm("a3") = b; in __simc() 66 : "+r"(a1), "+r"(b1) in __simc() 69 errno = b1; in __simc()
|
| A D | simcall-gdbio.h | 22 register int b1 asm("a6") = b; in __simc() 28 : "r"(b1), "r"(d1) in __simc()
|
| /arch/powerpc/kernel/vdso/ |
| A D | vgetrandom-chacha.S | 52 .macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 53 add \a1, \a1, \b1 69 xor \b1, \b1, \c1 73 rotlwi \b1, \b1, 12 77 add \a1, \a1, \b1 93 xor \b1, \b1, \c1 97 rotlwi \b1, \b1, 7 103 #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ argument 104 quarterround4 state##a1 state##b1 state##c1 state##d1 \
|
| /arch/arm/nwfpe/ |
| A D | softfloat-macros | 350 z1 = a1 + b1; 371 bits64 b1, 383 z1 = a1 + b1; 409 *z1Ptr = a1 - b1; 410 *z0Ptr = a0 - b0 - ( a1 < b1 ); 429 bits64 b1, 441 z1 = a1 - b1; 442 borrow0 = ( a1 < b1 ); 525 bits64 b1, 562 bits64 b0, b1; [all …]
|
| /arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 81 veor \b2, \b2, \b1 92 veor \b3, \b3, \b1 93 veor \b1, \b1, \b5 98 veor \b1, \b1, \b4 101 veor \b6, \b6, \b1 102 veor \b1, \b1, \b5 111 veor \b1, \b1, \b7 114 veor \b1, \b1, \b3 122 veor \b1, \b1, \b4 126 veor \b1, \b1, \b5 [all …]
|
| /arch/x86/crypto/ |
| A D | cast5-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 146 #define subround(a1, b1, a2, b2, f) \ argument [all …]
|
| A D | cast6-avx-x86_64-asm_64.S | 129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument 130 F_head(b1, RX, RGI1, RGI2, op0); \ 133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 139 #define F1_2(a1, b1, a2, b2) \ argument 140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 141 #define F2_2(a1, b1, a2, b2) \ argument 142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 143 #define F3_2(a1, b1, a2, b2) \ argument 144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
|
| A D | ghash-clmulni-intel_asm.S | 54 pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1 55 pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0) 57 pxor T1, T2 # T2 = a0 * b1 + a1 * b0
|
| A D | aria-gfni-avx512-asm_64.S | 67 a1, b1, c1, d1, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 88 vpshufb a0, b1, b1; \ 106 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 111 vmovdqu64 b1, st1; \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 115 vmovdqu64 st1, b1; \ 119 a1, b1, c1, d1, \ argument 140 vpshufb a0, b1, b1; \ 163 vmovdqu64 b1, st1; \ [all …]
|
| A D | aria-aesni-avx-asm_64.S | 67 a1, b1, c1, d1, \ argument 74 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 88 vpshufb a0, b1, b1; \ 106 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 111 vmovdqu b1, st1; \ 112 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 115 vmovdqu st1, b1; \ 119 a1, b1, c1, d1, \ argument 140 vpshufb a0, b1, b1; \ 163 vmovdqu b1, st1; \ [all …]
|
| A D | aria-aesni-avx2-asm_64.S | 83 a1, b1, c1, d1, \ argument 90 transpose_4x4(b0, b1, b2, b3, d2, d3); \ 104 vpshufb a0, b1, b1; \ 122 transpose_4x4(a1, b1, c1, d1, d2, d3); \ 127 vmovdqu b1, st1; \ 128 transpose_4x4(a2, b2, c2, d2, b0, b1); \ 131 vmovdqu st1, b1; \ 135 a1, b1, c1, d1, \ argument 156 vpshufb a0, b1, b1; \ 179 vmovdqu b1, st1; \ [all …]
|
| /arch/s390/net/ |
| A D | bpf_jit_comp.c | 122 u32 r1 = reg2hex[b1]; in reg_set_seen() 159 REG_SET_SEEN(b1); \ 173 REG_SET_SEEN(b1); \ 180 REG_SET_SEEN(b1); \ 195 REG_SET_SEEN(b1); \ 203 REG_SET_SEEN(b1); \ 245 REG_SET_SEEN(b1); \ 255 REG_SET_SEEN(b1); \ 264 REG_SET_SEEN(b1); \ 272 REG_SET_SEEN(b1); \ [all …]
|
| /arch/arm64/boot/dts/amd/ |
| A D | Makefile | 3 dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb
|
| /arch/sh/kernel/cpu/sh4/ |
| A D | softfloat.c | 90 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 92 void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, 638 void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in add128() argument 643 z1 = a1 + b1; in add128() 649 sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, in sub128() argument 652 *z1Ptr = a1 - b1; in sub128() 653 *z0Ptr = a0 - b0 - (a1 < b1); in sub128() 658 bits64 b0, b1; in estimateDiv128To64() local 672 b1 = b << 32; in estimateDiv128To64() 673 add128(rem0, rem1, b0, b1, &rem0, &rem1); in estimateDiv128To64()
|
| /arch/alpha/include/asm/ |
| A D | bitops.h | 460 unsigned long b0, b1, ofs, tmp; in sched_find_first_bit() local 463 b1 = b[1]; in sched_find_first_bit() 465 tmp = (b0 ? b0 : b1); in sched_find_first_bit()
|
| /arch/s390/mm/ |
| A D | extable.c | 80 u64 b1 : 4; member 111 uaddr = regs->gprs[insn->b1] + insn->d1; in ex_handler_ua_mvcos()
|
| /arch/x86/net/ |
| A D | bpf_jit_comp.c | 40 #define EMIT1(b1) EMIT(b1, 1) argument 41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) argument 44 #define EMIT5(b1, b2, b3, b4, b5) \ argument 47 #define EMIT1_off32(b1, off) \ argument 49 #define EMIT2_off32(b1, b2, off) \ argument 904 u8 b1, b2, b3; in emit_mov_imm32() local 912 b1 = add_1mod(0x48, dst_reg); in emit_mov_imm32() 1437 u8 b1, b2; in emit_3vex() local [all …]
|
| A D | bpf_jit_comp32.c | 67 #define EMIT1(b1) EMIT(b1, 1) argument 68 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) argument 69 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) argument 70 #define EMIT4(b1, b2, b3, b4) \ argument 73 #define EMIT1_off32(b1, off) \ argument 74 do { EMIT1(b1); EMIT(off, 4); } while (0) 75 #define EMIT2_off32(b1, b2, off) \ argument 76 do { EMIT2(b1, b2); EMIT(off, 4); } while (0) 77 #define EMIT3_off32(b1, b2, b3, off) \ argument 78 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) [all …]
|
| /arch/powerpc/crypto/ |
| A D | aes-tab-4k.S | 36 .long R(de, 6f, 6f, b1), R(91, c5, c5, 54) 76 .long R(79, b1, b1, c8), R(b6, 5b, 5b, ed) 123 .long R(01, 8d, 8d, 8c), R(b1, d5, d5, 64) 172 .long R(de, b1, 5a, 49), R(25, ba, 1b, 67) 186 .long R(b1, 64, 77, e0), R(bb, 6b, ae, 84) 220 .long R(0c, 0a, 67, b1), R(93, 57, e7, 0f) 279 .long R(e1, 1c, e5, ed), R(7a, 47, b1, 3c)
|
| /arch/arm64/tools/ |
| A D | sysreg | 1531 0b1 IMP 1536 0b1 IMP 1551 0b1 IMP 1559 0b1 IMP 1563 0b1 IMP 1567 0b1 IMP 1571 0b1 IMP 1579 0b1 IMP 1583 0b1 IMP 1587 0b1 IMP [all …]
|