| /linux/arch/riscv/lib/ |
| A D | memcpy.S | 18 andi a3, t6, SZREG-1 34 sb a5, 0(t6) 35 addi t6, t6, 1 54 REG_S a4, 0(t6) 55 REG_S a5, SZREG(t6) 77 addi t6, t6, 16*SZREG 87 or a5, a1, t6 94 sw a4, 0(t6) 95 addi t6, t6, 4 103 sb a4, 0(t6) [all …]
|
| A D | uaccess.S | 19 li t6, SR_SUM 20 csrs CSR_STATUS, t6 173 csrc CSR_STATUS, t6 185 li t6, SR_SUM 186 csrs CSR_STATUS, t6 208 csrc CSR_STATUS, t6 229 csrs CSR_STATUS, t6 233 csrs CSR_STATUS, t6
|
| /linux/arch/alpha/lib/ |
| A D | stxcpy.S | 78 and t12, 0x80, t6 # e0 : 79 bne t6, 1f # .. e1 (zdb) 84 subq t12, 1, t6 # .. e1 : 86 or t12, t6, t8 # .. e1 : 142 or t1, t6, t6 # e0 : 147 mskql t6, a1, t6 # e0 : mask out the bits we have 224 or t6, t12, t8 # e0 : 245 mov zero, t6 # e0 : 248 lda t6, -1 # .. e1 : 249 mskql t6, a0, t6 # e0 : [all …]
|
| A D | stxncpy.S | 92 bne t6, 1f # .. e1 (zdb) 98 or t12, t6, t8 # e0 : 165 or t0, t6, t6 # e1 : mask original data for zero test 168 lda t6, -1 # e0 : 171 mskql t6, a1, t6 # e0 : mask out bits already seen 273 sll t10, t6, t6 # e0 : 274 and t6, 0xff, t6 # e0 : 275 bne t6, 1f # .. e1 : 302 mov zero, t6 # e0 : 305 lda t6, -1 # .. e1 : [all …]
|
| A D | ev6-stxcpy.S | 94 bne t6, 1f # U : (stall) 99 subq t12, 1, t6 # E : 101 or t12, t6, t8 # E : (stall) 165 or t1, t6, t6 # E : 170 mskql t6, a1, t6 # U : mask out the bits we have 246 bne t6, 1f # U : (stall) 249 subq t12, 1, t6 # E : 274 mov zero, t6 # E : 277 lda t6, -1 # E : 279 mskql t6, a0, t6 # U : [all …]
|
| A D | ev6-stxncpy.S | 123 subq t12, 1, t6 # E : 205 or t0, t6, t6 # E : mask original data for zero test (stall) 209 lda t6, -1 # E : 213 mskql t6, a1, t6 # U : mask out bits already seen 302 subq t12, 1, t6 # E : 319 sll t10, t6, t6 # U : (stall) 320 and t6, 0xff, t6 # E : (stall) 346 mov zero, t6 # E : 349 lda t6, -1 # E : 351 mskql t6, a0, t6 # U : [all …]
|
| A D | strrchr.S | 23 mov zero, t6 # .. e1 : t6 is last match aligned addr 46 cmovne t3, v0, t6 # .. e1 : save previous comparisons match 63 cmovne t3, v0, t6 # e0 : 80 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
|
| A D | ev67-strrchr.S | 40 mov zero, t6 # E : t6 is last match aligned addr 68 cmovne t3, v0, t6 # E : save previous comparisons match 94 cmovne t3, v0, t6 # E : 105 addq t6, t5, v0 # E : and add to quadword address
|
| /linux/arch/arm64/crypto/ |
| A D | crct10dif-ce-core.S | 86 t6 .req v20 144 pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B 158 pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B 164 eor t6.16b, t6.16b, t9.16b // N = I + J 168 uzp1 t7.2d, t6.2d, t3.2d 169 uzp2 t6.2d, t6.2d, t3.2d 179 and t6.16b, t6.16b, k00_16.16b 186 zip2 t3.2d, t7.2d, t6.2d 187 zip1 t6.2d, t7.2d, t6.2d 191 ext t6.16b, t6.16b, t6.16b, #13 [all …]
|
| A D | aes-neonbs-core.S | 250 t0, t1, t2, t3, t4, t5, t6, t7, inv 265 eor \x6\().16b, \x6\().16b, \t6\().16b 275 eor \t6\().16b, \t6\().16b, \x5\().16b 293 eor \x3\().16b, \x3\().16b, \t6\().16b 298 eor \x2\().16b, \x3\().16b, \t6\().16b 306 t0, t1, t2, t3, t4, t5, t6, t7 312 eor \t6\().16b, \t6\().16b, \x6\().16b 323 eor \x0\().16b, \x0\().16b, \t6\().16b 324 eor \x1\().16b, \x1\().16b, \t6\().16b 330 eor \x4\().16b, \x4\().16b, \t6\().16b [all …]
|
| A D | ghash-ce-core.S | 27 t6 .req v13 102 pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2 109 eor t5.16b, t5.16b, t6.16b // M = G + H 114 uzp1 t6.2d, t7.2d, t9.2d 124 eor t6.16b, t6.16b, t7.16b 128 eor t6.16b, t6.16b, t7.16b 132 zip2 t9.2d, t6.2d, t7.2d 133 zip1 t7.2d, t6.2d, t7.2d
|
| /linux/arch/x86/crypto/ |
| A D | camellia-aesni-avx-asm_64.S | 73 filter_8bit(x0, t0, t1, t7, t6); \ 111 vpxor t6, t6, t6; \ 123 vpshufb t6, t0, t0; \ 124 vpshufb t6, t1, t1; \ 125 vpshufb t6, t2, t2; \ 126 vpshufb t6, t3, t3; \ 127 vpshufb t6, t4, t4; \ 129 vpshufb t6, t7, t7; \ 165 vpshufb t6, t5, t5; \ 166 vpshufb t6, t3, t6; \ [all …]
|
| A D | camellia-aesni-avx2-asm_64.S | 86 filter_8bit(x0, t5, t6, t7, t4); \ 87 filter_8bit(x7, t5, t6, t7, t4); \ 94 filter_8bit(x2, t5, t6, t7, t4); \ 102 vextracti128 $1, x2, t6##_x; \ 121 vaesenclast t4##_x, t6##_x, t6##_x; \ 122 vinserti128 $1, t6##_x, x2, x2; \ 176 vpsrldq $6, t0, t6; \ 178 vpshufb t7, t6, t6; \ 192 vpxor t6, x1, x1; \ 195 vpsrldq $7, t0, t6; \ [all …]
|
| /linux/arch/riscv/kernel/ |
| A D | mcount.S | 67 mv t6, s0 71 mv a0, t6 90 la t6, ftrace_graph_entry_stub 91 bne t2, t6, do_ftrace_graph_caller
|
| /linux/arch/mips/kernel/ |
| A D | scall32-o32.S | 63 load_a5: user_lw(t6, 20(t0)) # argument #6 from usp 69 sw t6, 20(sp) # argument #6 to ksp 159 li t6, 0 195 lw t6, 28(sp) 198 sw t6, 24(sp)
|
| /linux/arch/ia64/lib/ |
| A D | copy_page_mck.S | 82 #define t6 t2 // alias! macro 87 #define t12 t6 // alias! 158 (p[D]) ld8 t6 = [src0], 3*8 165 (p[D]) st8 [dst0] = t6, 3*8
|
| A D | copy_page.S | 45 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH] 84 (p[0]) ld8 t6[0]=[src2],16 85 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16
|
| A D | memcpy_mck.S | 50 #define t6 t2 // alias! macro 56 #define t12 t6 // alias! 238 EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8) 245 EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8) 440 EK(.ex_handler_short, (p8) ld1 t6=[src1],2) 445 EK(.ex_handler_short, (p8) st1 [dst1]=t6,2) 487 EX(.ex_handler_short, (p11) ld1 t6=[src1],2) 494 EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
|
| /linux/scripts/ |
| A D | makelst | 29 t6=`printf "%lu" $((0x$t4 - 0x$t5))` 32 $3 -r --source --adjust-vma=${t6:-0} $1
|
| /linux/arch/sparc/lib/ |
| A D | blockops.S | 28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 32 ldd [src + offset + 0x00], t6; \ 36 std t6, [dst + offset + 0x00];
|
| A D | copy_user.S | 68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 72 LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ 79 ST(st, dst, offset + 0x18, t6, bigchunk_fault) \ 83 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 87 LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ 91 ST(std, dst, offset + 0x18, t6, bigchunk_fault)
|
| A D | memcpy.S | 19 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 23 ldd [%src + (offset) + 0x18], %t6; \ 30 st %t6, [%dst + (offset) + 0x18]; \ 33 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 37 ldd [%src + (offset) + 0x18], %t6; \ 41 std %t6, [%dst + (offset) + 0x18];
|
| A D | checksum_32.S | 164 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 170 EX(ldd [src + off + 0x18], t6); \ 178 EX(std t6, [dst + off + 0x18]); \ 179 addxcc t6, sum, sum; \ 186 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument 190 EX(ldd [src + off + 0x18], t6); \ 203 EX(st t6, [dst + off + 0x18]); \ 204 addxcc t6, sum, sum; \
|
| /linux/arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 312 veor \x6, \x6, \t6 322 veor \t6, \t6, \x5 340 veor \x3, \x3, \t6 345 veor \x2, \x3, \t6 365 veor \x6, \x6, \t6 368 vext.8 \t6, \x6, \x6, #8 372 veor \t6, \t6, \x6 383 veor \x0, \x0, \t6 384 veor \x1, \x1, \t6 390 veor \x4, \x4, \t6 [all …]
|
| /linux/drivers/media/pci/cx88/ |
| A D | cx88-dsp.c | 72 u32 t2, t4, t6, t8; in int_cos() local 87 t6 = t4 * x / 32768 * x / 32768 / 5 / 6; in int_cos() 88 t8 = t6 * x / 32768 * x / 32768 / 7 / 8; in int_cos() 89 ret = 32768 - t2 + t4 - t6 + t8; in int_cos()
|