| /arch/x86/crypto/ |
| A D | aria-aesni-avx-asm_64.S | 173 y0, y1, y2, y3, \ argument 201 y0, y1, y2, y3, \ 224 y0, y1, y2, y3, \ argument 387 vpxor y3, x7, x7; \ 393 vpxor y7, y3, y3; \ 411 vpxor x3, y3, y3; \ 417 vpxor y3, x7, x7; 421 y0, y1, y2, y3, \ argument 476 y0, y1, y2, y3, \ argument 531 y0, y1, y2, y3, \ argument [all …]
|
| A D | aria-aesni-avx2-asm_64.S | 189 y0, y1, y2, y3, \ argument 217 y0, y1, y2, y3, \ 240 y0, y1, y2, y3, \ argument 428 vpxor y3, x7, x7; \ 434 vpxor y7, y3, y3; \ 452 vpxor x3, y3, y3; \ 458 vpxor y3, x7, x7; 462 y0, y1, y2, y3, \ argument 516 y0, y1, y2, y3, \ argument 570 y0, y1, y2, y3, \ argument [all …]
|
| A D | aria-gfni-avx512-asm_64.S | 173 y0, y1, y2, y3, \ argument 196 y0, y1, y2, y3, \ argument 201 y0, y1, y2, y3, \ 224 y0, y1, y2, y3, \ argument 297 vpxorq t0, y3, y3; \ 360 vgf2p8affineqb $(tf_x2_const), t4, y3, y3; \ 362 vgf2p8affineinvqb $0, t2, y3, y3; \ 389 vpxorq y3, x7, x7; \ 395 vpxorq y7, y3, y3; \ 413 vpxorq x3, y3, y3; \ [all …]
|
| A D | camellia-aesni-avx-asm_64.S | 247 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 249 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 251 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 256 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 258 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 260 two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 492 vpxor 4 * 16(rio), x0, y3; \ 508 byteslice_16x16b(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ 522 vmovdqu y3, 3 * 16(mem_cd); \ 543 vpxor x0, y3, y3; \ [all …]
|
| A D | camellia-aesni-avx2-asm_64.S | 279 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 281 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 283 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 288 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 290 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 292 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ 524 vpxor 4 * 32(rio), x0, y3; \ 554 vmovdqu y3, 3 * 32(mem_cd); \ 564 y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ 575 vpxor x0, y3, y3; \ [all …]
|
| /arch/sparc/include/asm/ |
| A D | sfp-machine_32.h | 108 #define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 121 "rI" ((USItype)(y3)), \ 133 #define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \ argument 146 "rI" ((USItype)(y3)), \ 160 #define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y… argument
|
| /arch/arm/crypto/ |
| A D | aes-neonbs-core.S | 151 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 153 veor \t1, \y2, \y3 161 vand \x2, \x2, \y3 169 y0, y1, y2, y3, t0, t1, t2, t3 174 veor \y1, \y1, \y3 175 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 182 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 184 veor \y1, \y1, \y3
|
| /arch/arm64/crypto/ |
| A D | aes-neonbs-core.S | 97 .macro mul_gf4_n_gf4, x0, x1, y0, y1, t0, x2, x3, y2, y3, t1 99 eor \t1, \y2, \y3 107 and \x2, \x2, \y3 115 y0, y1, y2, y3, t0, t1, t2, t3 120 eor \y1, \y1, \y3 121 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x2, \x3, \y2, \y3, \t2 128 mul_gf4_n_gf4 \t0, \t1, \y0, \y1, \t3, \x6, \x7, \y2, \y3, \t2 130 eor \y1, \y1, \y3
|