| /arch/x86/lib/ |
| A D | memmove_32.S | 23 .set tmp0, %edx define 60 movl src, tmp0 61 xorl dest, tmp0 62 andl $0xff, tmp0 70 movl 0*4(src), tmp0 91 movl tmp0, (tmp1) 97 movl (src), tmp0 104 movl tmp0,(tmp1) 113 movl src, tmp0 114 xorl dest, tmp0 [all …]
|
| /arch/arm/include/asm/ |
| A D | uaccess-asm.h | 131 .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable 132 DACR( mrc p15, 0, \tmp0, c3, c0, 0) 133 DACR( str \tmp0, [sp, #SVC_DACR]) 134 PAN( mrc p15, 0, \tmp0, c2, c0, 2) 135 PAN( str \tmp0, [sp, #SVC_TTBCR]) 143 bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) 151 .macro uaccess_exit, tsk, tmp0, tmp1 152 DACR( ldr \tmp0, [sp, #SVC_DACR]) 153 DACR( mcr p15, 0, \tmp0, c3, c0, 0) 154 PAN( ldr \tmp0, [sp, #SVC_TTBCR]) [all …]
|
| /arch/loongarch/include/asm/ |
| A D | asmmacro.h | 63 andi \tmp0, \tmp0, FPU_CSR_TM 64 beqz \tmp0, 2f 67 andi \tmp0, \tmp0, 0x7 92 movcf2gr \tmp0, $fcc0 93 move \tmp1, \tmp0 94 movcf2gr \tmp0, $fcc1 96 movcf2gr \tmp0, $fcc2 98 movcf2gr \tmp0, $fcc3 100 movcf2gr \tmp0, $fcc4 102 movcf2gr \tmp0, $fcc5 [all …]
|
| /arch/loongarch/kernel/ |
| A D | fpu.S | 100 movcf2gr \tmp0, $fcc0 101 move \tmp1, \tmp0 102 movcf2gr \tmp0, $fcc1 104 movcf2gr \tmp0, $fcc2 106 movcf2gr \tmp0, $fcc3 108 movcf2gr \tmp0, $fcc4 110 movcf2gr \tmp0, $fcc5 112 movcf2gr \tmp0, $fcc6 114 movcf2gr \tmp0, $fcc7 144 andi \tmp0, \tmp0, FPU_CSR_TM [all …]
|
| /arch/x86/crypto/ |
| A D | sm4-aesni-avx2-asm_64.S | 76 #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ argument 77 vpand x, mask4bit, tmp0; \ 81 vpshufb tmp0, lo_t, tmp0; \ 83 vpxor tmp0, x, x; 87 #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ argument 88 vpandn mask4bit, x, tmp0; \ 92 vpshufb tmp0, lo_t, tmp0; \ 94 vpxor tmp0, x, x;
|
| A D | sm4-aesni-avx-asm_64.S | 60 #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ argument 61 vpand x, mask4bit, tmp0; \ 65 vpshufb tmp0, lo_t, tmp0; \ 67 vpxor tmp0, x, x; 72 #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ argument 73 vpandn mask4bit, x, tmp0; \ 77 vpshufb tmp0, lo_t, tmp0; \ 79 vpxor tmp0, x, x;
|
| A D | camellia-aesni-avx-asm_64.S | 34 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 35 vpand x, mask4bit, tmp0; \ 39 vpshufb tmp0, lo_t, tmp0; \ 41 vpxor tmp0, x, x;
|
| A D | camellia-aesni-avx2-asm_64.S | 25 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 26 vpand x, mask4bit, tmp0; \ 30 vpshufb tmp0, lo_t, tmp0; \ 32 vpxor tmp0, x, x;
|
| A D | aria-gfni-avx512-asm_64.S | 44 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 45 vpandq x, mask4bit, tmp0; \ 49 vpshufb tmp0, lo_t, tmp0; \ 51 vpxorq tmp0, x, x;
|
| A D | aria-aesni-avx-asm_64.S | 44 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 45 vpand x, mask4bit, tmp0; \ 49 vpshufb tmp0, lo_t, tmp0; \ 51 vpxor tmp0, x, x;
|
| A D | aria-aesni-avx2-asm_64.S | 60 #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ argument 61 vpand x, mask4bit, tmp0; \ 65 vpshufb tmp0, lo_t, tmp0; \ 67 vpxor tmp0, x, x;
|
| /arch/arm64/crypto/ |
| A D | aes-neon.S | 158 .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const 159 sshr \tmp0\().16b, \in0\().16b, #7 162 and \tmp0\().16b, \tmp0\().16b, \const\().16b 165 eor \out0\().16b, \out0\().16b, \tmp0\().16b 169 .macro mul_by_x2_2x, out0, out1, in0, in1, tmp0, tmp1, const 170 ushr \tmp0\().16b, \in0\().16b, #6 173 pmul \tmp0\().16b, \tmp0\().16b, \const\().16b 176 eor \out0\().16b, \out0\().16b, \tmp0\().16b
|
| /arch/arm64/include/asm/ |
| A D | assembler.h | 346 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 347 mrs \tmp0, ID_AA64MMFR0_EL1 349 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3 356 cmp \tmp0, \tmp1 357 csel \tmp0, \tmp1, \tmp0, hi 358 bfi \tcr, \tmp0, \pos, #3
|
| /arch/xtensa/kernel/ |
| A D | setup.c | 410 unsigned long tmp0, tmp1, tmp2, tmp3; in cpu_reset() local 490 : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2), in cpu_reset()
|