1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64) 4 * 5 * Copyright (C) 2012 Johannes Goetzfried 6 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 7 * 8 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 9 */ 10 11#include <linux/linkage.h> 12#include <asm/frame.h> 13 14.file "cast5-avx-x86_64-asm_64.S" 15 16.extern cast_s1 17.extern cast_s2 18.extern cast_s3 19.extern cast_s4 20 21/* structure of crypto context */ 22#define km 0 23#define kr (16*4) 24#define rr ((16*4)+16) 25 26/* s-boxes */ 27#define s1 cast_s1 28#define s2 cast_s2 29#define s3 cast_s3 30#define s4 cast_s4 31 32/********************************************************************** 33 16-way AVX cast5 34 **********************************************************************/ 35#define CTX %r15 36 37#define RL1 %xmm0 38#define RR1 %xmm1 39#define RL2 %xmm2 40#define RR2 %xmm3 41#define RL3 %xmm4 42#define RR3 %xmm5 43#define RL4 %xmm6 44#define RR4 %xmm7 45 46#define RX %xmm8 47 48#define RKM %xmm9 49#define RKR %xmm10 50#define RKRF %xmm11 51#define RKRR %xmm12 52 53#define R32 %xmm13 54#define R1ST %xmm14 55 56#define RTMP %xmm15 57 58#define RID1 %rdi 59#define RID1d %edi 60#define RID2 %rsi 61#define RID2d %esi 62 63#define RGI1 %rdx 64#define RGI1bl %dl 65#define RGI1bh %dh 66#define RGI2 %rcx 67#define RGI2bl %cl 68#define RGI2bh %ch 69 70#define RGI3 %rax 71#define RGI3bl %al 72#define RGI3bh %ah 73#define RGI4 %rbx 74#define RGI4bl %bl 75#define RGI4bh %bh 76 77#define RFS1 %r8 78#define RFS1d %r8d 79#define RFS2 %r9 80#define RFS2d %r9d 81#define RFS3 %r10 82#define RFS3d %r10d 83 84 85#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ 86 movzbl src ## bh, RID1d; \ 87 movzbl src ## bl, RID2d; \ 88 shrq $16, src; \ 89 movl s1(, RID1, 4), dst ## d; \ 90 op1 s2(, RID2, 4), dst ## d; \ 91 movzbl src ## bh, RID1d; \ 92 movzbl src ## bl, RID2d; \ 93 interleave_op(il_reg); \ 94 op2 s3(, RID1, 4), dst ## d; \ 95 op3 s4(, RID2, 4), dst ## d; 96 97#define dummy(d) /* do nothing */ 98 99#define shr_next(reg) \ 100 shrq $16, reg; 101 102#define F_head(a, x, gi1, gi2, op0) \ 103 op0 a, RKM, x; \ 104 vpslld RKRF, x, RTMP; \ 105 vpsrld RKRR, x, x; \ 106 vpor RTMP, x, x; \ 107 \ 108 vmovq x, gi1; \ 109 vpextrq $1, x, gi2; 110 111#define F_tail(a, x, gi1, gi2, op1, op2, op3) \ 112 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ 113 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ 114 \ 115 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ 116 shlq $32, RFS2; \ 117 orq RFS1, RFS2; \ 118 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ 119 shlq $32, RFS1; \ 120 orq RFS1, RFS3; \ 121 \ 122 vmovq RFS2, x; \ 123 vpinsrq $1, RFS3, x, x; 124 125#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ 126 F_head(b1, RX, RGI1, RGI2, op0); \ 127 F_head(b2, RX, RGI3, RGI4, op0); \ 128 \ 129 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ 130 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ 131 \ 132 vpxor a1, RX, a1; \ 133 vpxor a2, RTMP, a2; 134 135#define F1_2(a1, b1, a2, b2) \ 136 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) 137#define F2_2(a1, b1, a2, b2) \ 138 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) 139#define F3_2(a1, b1, a2, b2) \ 140 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) 141 142#define subround(a1, b1, a2, b2, f) \ 143 F ## f ## _2(a1, b1, a2, b2); 144 145#define round(l, r, n, f) \ 146 vbroadcastss (km+(4*n))(CTX), RKM; \ 147 vpand R1ST, RKR, RKRF; \ 148 vpsubq RKRF, R32, RKRR; \ 149 vpsrldq $1, RKR, RKR; \ 150 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \ 151 subround(l ## 3, r ## 3, l ## 4, r ## 4, f); 152 153#define enc_preload_rkr() \ 154 vbroadcastss .L16_mask, RKR; \ 155 /* add 16-bit rotation to key rotations (mod 32) */ \ 156 vpxor kr(CTX), RKR, RKR; 157 158#define dec_preload_rkr() \ 159 vbroadcastss .L16_mask, RKR; \ 160 /* add 16-bit rotation to key rotations (mod 32) */ \ 161 vpxor kr(CTX), RKR, RKR; \ 162 vpshufb .Lbswap128_mask, RKR, RKR; 163 164#define transpose_2x4(x0, x1, t0, t1) \ 165 vpunpckldq x1, x0, t0; \ 166 vpunpckhdq x1, x0, t1; \ 167 \ 168 vpunpcklqdq t1, t0, x0; \ 169 vpunpckhqdq t1, t0, x1; 170 171#define inpack_blocks(x0, x1, t0, t1, rmask) \ 172 vpshufb rmask, x0, x0; \ 173 vpshufb rmask, x1, x1; \ 174 \ 175 transpose_2x4(x0, x1, t0, t1) 176 177#define outunpack_blocks(x0, x1, t0, t1, rmask) \ 178 transpose_2x4(x0, x1, t0, t1) \ 179 \ 180 vpshufb rmask, x0, x0; \ 181 vpshufb rmask, x1, x1; 182 183.section .rodata.cst16.bswap_mask, "aM", @progbits, 16 184.align 16 185.Lbswap_mask: 186 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 187.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16 188.align 16 189.Lbswap128_mask: 190 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 191.section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16 192.align 16 193.Lbswap_iv_mask: 194 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0 195 196.section .rodata.cst4.16_mask, "aM", @progbits, 4 197.align 4 198.L16_mask: 199 .byte 16, 16, 16, 16 200.section .rodata.cst4.32_mask, "aM", @progbits, 4 201.align 4 202.L32_mask: 203 .byte 32, 0, 0, 0 204.section .rodata.cst4.first_mask, "aM", @progbits, 4 205.align 4 206.Lfirst_mask: 207 .byte 0x1f, 0, 0, 0 208 209.text 210 211SYM_FUNC_START_LOCAL(__cast5_enc_blk16) 212 /* input: 213 * %rdi: ctx 214 * RL1: blocks 1 and 2 215 * RR1: blocks 3 and 4 216 * RL2: blocks 5 and 6 217 * RR2: blocks 7 and 8 218 * RL3: blocks 9 and 10 219 * RR3: blocks 11 and 12 220 * RL4: blocks 13 and 14 221 * RR4: blocks 15 and 16 222 * output: 223 * RL1: encrypted blocks 1 and 2 224 * RR1: encrypted blocks 3 and 4 225 * RL2: encrypted blocks 5 and 6 226 * RR2: encrypted blocks 7 and 8 227 * RL3: encrypted blocks 9 and 10 228 * RR3: encrypted blocks 11 and 12 229 * RL4: encrypted blocks 13 and 14 230 * RR4: encrypted blocks 15 and 16 231 */ 232 233 pushq %r15; 234 pushq %rbx; 235 236 movq %rdi, CTX; 237 238 vmovdqa .Lbswap_mask, RKM; 239 vmovd .Lfirst_mask, R1ST; 240 vmovd .L32_mask, R32; 241 enc_preload_rkr(); 242 243 inpack_blocks(RL1, RR1, RTMP, RX, RKM); 244 inpack_blocks(RL2, RR2, RTMP, RX, RKM); 245 inpack_blocks(RL3, RR3, RTMP, RX, RKM); 246 inpack_blocks(RL4, RR4, RTMP, RX, RKM); 247 248 round(RL, RR, 0, 1); 249 round(RR, RL, 1, 2); 250 round(RL, RR, 2, 3); 251 round(RR, RL, 3, 1); 252 round(RL, RR, 4, 2); 253 round(RR, RL, 5, 3); 254 round(RL, RR, 6, 1); 255 round(RR, RL, 7, 2); 256 round(RL, RR, 8, 3); 257 round(RR, RL, 9, 1); 258 round(RL, RR, 10, 2); 259 round(RR, RL, 11, 3); 260 261 movzbl rr(CTX), %eax; 262 testl %eax, %eax; 263 jnz .L__skip_enc; 264 265 round(RL, RR, 12, 1); 266 round(RR, RL, 13, 2); 267 round(RL, RR, 14, 3); 268 round(RR, RL, 15, 1); 269 270.L__skip_enc: 271 popq %rbx; 272 popq %r15; 273 274 vmovdqa .Lbswap_mask, RKM; 275 276 outunpack_blocks(RR1, RL1, RTMP, RX, RKM); 277 outunpack_blocks(RR2, RL2, RTMP, RX, RKM); 278 outunpack_blocks(RR3, RL3, RTMP, RX, RKM); 279 outunpack_blocks(RR4, RL4, RTMP, RX, RKM); 280 281 RET; 282SYM_FUNC_END(__cast5_enc_blk16) 283 284SYM_FUNC_START_LOCAL(__cast5_dec_blk16) 285 /* input: 286 * %rdi: ctx 287 * RL1: encrypted blocks 1 and 2 288 * RR1: encrypted blocks 3 and 4 289 * RL2: encrypted blocks 5 and 6 290 * RR2: encrypted blocks 7 and 8 291 * RL3: encrypted blocks 9 and 10 292 * RR3: encrypted blocks 11 and 12 293 * RL4: encrypted blocks 13 and 14 294 * RR4: encrypted blocks 15 and 16 295 * output: 296 * RL1: decrypted blocks 1 and 2 297 * RR1: decrypted blocks 3 and 4 298 * RL2: decrypted blocks 5 and 6 299 * RR2: decrypted blocks 7 and 8 300 * RL3: decrypted blocks 9 and 10 301 * RR3: decrypted blocks 11 and 12 302 * RL4: decrypted blocks 13 and 14 303 * RR4: decrypted blocks 15 and 16 304 */ 305 306 pushq %r15; 307 pushq %rbx; 308 309 movq %rdi, CTX; 310 311 vmovdqa .Lbswap_mask, RKM; 312 vmovd .Lfirst_mask, R1ST; 313 vmovd .L32_mask, R32; 314 dec_preload_rkr(); 315 316 inpack_blocks(RL1, RR1, RTMP, RX, RKM); 317 inpack_blocks(RL2, RR2, RTMP, RX, RKM); 318 inpack_blocks(RL3, RR3, RTMP, RX, RKM); 319 inpack_blocks(RL4, RR4, RTMP, RX, RKM); 320 321 movzbl rr(CTX), %eax; 322 testl %eax, %eax; 323 jnz .L__skip_dec; 324 325 round(RL, RR, 15, 1); 326 round(RR, RL, 14, 3); 327 round(RL, RR, 13, 2); 328 round(RR, RL, 12, 1); 329 330.L__dec_tail: 331 round(RL, RR, 11, 3); 332 round(RR, RL, 10, 2); 333 round(RL, RR, 9, 1); 334 round(RR, RL, 8, 3); 335 round(RL, RR, 7, 2); 336 round(RR, RL, 6, 1); 337 round(RL, RR, 5, 3); 338 round(RR, RL, 4, 2); 339 round(RL, RR, 3, 1); 340 round(RR, RL, 2, 3); 341 round(RL, RR, 1, 2); 342 round(RR, RL, 0, 1); 343 344 vmovdqa .Lbswap_mask, RKM; 345 popq %rbx; 346 popq %r15; 347 348 outunpack_blocks(RR1, RL1, RTMP, RX, RKM); 349 outunpack_blocks(RR2, RL2, RTMP, RX, RKM); 350 outunpack_blocks(RR3, RL3, RTMP, RX, RKM); 351 outunpack_blocks(RR4, RL4, RTMP, RX, RKM); 352 353 RET; 354 355.L__skip_dec: 356 vpsrldq $4, RKR, RKR; 357 jmp .L__dec_tail; 358SYM_FUNC_END(__cast5_dec_blk16) 359 360SYM_FUNC_START(cast5_ecb_enc_16way) 361 /* input: 362 * %rdi: ctx 363 * %rsi: dst 364 * %rdx: src 365 */ 366 FRAME_BEGIN 367 pushq %r15; 368 369 movq %rdi, CTX; 370 movq %rsi, %r11; 371 372 vmovdqu (0*4*4)(%rdx), RL1; 373 vmovdqu (1*4*4)(%rdx), RR1; 374 vmovdqu (2*4*4)(%rdx), RL2; 375 vmovdqu (3*4*4)(%rdx), RR2; 376 vmovdqu (4*4*4)(%rdx), RL3; 377 vmovdqu (5*4*4)(%rdx), RR3; 378 vmovdqu (6*4*4)(%rdx), RL4; 379 vmovdqu (7*4*4)(%rdx), RR4; 380 381 call __cast5_enc_blk16; 382 383 vmovdqu RR1, (0*4*4)(%r11); 384 vmovdqu RL1, (1*4*4)(%r11); 385 vmovdqu RR2, (2*4*4)(%r11); 386 vmovdqu RL2, (3*4*4)(%r11); 387 vmovdqu RR3, (4*4*4)(%r11); 388 vmovdqu RL3, (5*4*4)(%r11); 389 vmovdqu RR4, (6*4*4)(%r11); 390 vmovdqu RL4, (7*4*4)(%r11); 391 392 popq %r15; 393 FRAME_END 394 RET; 395SYM_FUNC_END(cast5_ecb_enc_16way) 396 397SYM_FUNC_START(cast5_ecb_dec_16way) 398 /* input: 399 * %rdi: ctx 400 * %rsi: dst 401 * %rdx: src 402 */ 403 404 FRAME_BEGIN 405 pushq %r15; 406 407 movq %rdi, CTX; 408 movq %rsi, %r11; 409 410 vmovdqu (0*4*4)(%rdx), RL1; 411 vmovdqu (1*4*4)(%rdx), RR1; 412 vmovdqu (2*4*4)(%rdx), RL2; 413 vmovdqu (3*4*4)(%rdx), RR2; 414 vmovdqu (4*4*4)(%rdx), RL3; 415 vmovdqu (5*4*4)(%rdx), RR3; 416 vmovdqu (6*4*4)(%rdx), RL4; 417 vmovdqu (7*4*4)(%rdx), RR4; 418 419 call __cast5_dec_blk16; 420 421 vmovdqu RR1, (0*4*4)(%r11); 422 vmovdqu RL1, (1*4*4)(%r11); 423 vmovdqu RR2, (2*4*4)(%r11); 424 vmovdqu RL2, (3*4*4)(%r11); 425 vmovdqu RR3, (4*4*4)(%r11); 426 vmovdqu RL3, (5*4*4)(%r11); 427 vmovdqu RR4, (6*4*4)(%r11); 428 vmovdqu RL4, (7*4*4)(%r11); 429 430 popq %r15; 431 FRAME_END 432 RET; 433SYM_FUNC_END(cast5_ecb_dec_16way) 434 435SYM_FUNC_START(cast5_cbc_dec_16way) 436 /* input: 437 * %rdi: ctx 438 * %rsi: dst 439 * %rdx: src 440 */ 441 FRAME_BEGIN 442 pushq %r12; 443 pushq %r15; 444 445 movq %rdi, CTX; 446 movq %rsi, %r11; 447 movq %rdx, %r12; 448 449 vmovdqu (0*16)(%rdx), RL1; 450 vmovdqu (1*16)(%rdx), RR1; 451 vmovdqu (2*16)(%rdx), RL2; 452 vmovdqu (3*16)(%rdx), RR2; 453 vmovdqu (4*16)(%rdx), RL3; 454 vmovdqu (5*16)(%rdx), RR3; 455 vmovdqu (6*16)(%rdx), RL4; 456 vmovdqu (7*16)(%rdx), RR4; 457 458 call __cast5_dec_blk16; 459 460 /* xor with src */ 461 vmovq (%r12), RX; 462 vpshufd $0x4f, RX, RX; 463 vpxor RX, RR1, RR1; 464 vpxor 0*16+8(%r12), RL1, RL1; 465 vpxor 1*16+8(%r12), RR2, RR2; 466 vpxor 2*16+8(%r12), RL2, RL2; 467 vpxor 3*16+8(%r12), RR3, RR3; 468 vpxor 4*16+8(%r12), RL3, RL3; 469 vpxor 5*16+8(%r12), RR4, RR4; 470 vpxor 6*16+8(%r12), RL4, RL4; 471 472 vmovdqu RR1, (0*16)(%r11); 473 vmovdqu RL1, (1*16)(%r11); 474 vmovdqu RR2, (2*16)(%r11); 475 vmovdqu RL2, (3*16)(%r11); 476 vmovdqu RR3, (4*16)(%r11); 477 vmovdqu RL3, (5*16)(%r11); 478 vmovdqu RR4, (6*16)(%r11); 479 vmovdqu RL4, (7*16)(%r11); 480 481 popq %r15; 482 popq %r12; 483 FRAME_END 484 RET; 485SYM_FUNC_END(cast5_cbc_dec_16way) 486 487SYM_FUNC_START(cast5_ctr_16way) 488 /* input: 489 * %rdi: ctx 490 * %rsi: dst 491 * %rdx: src 492 * %rcx: iv (big endian, 64bit) 493 */ 494 FRAME_BEGIN 495 pushq %r12; 496 pushq %r15; 497 498 movq %rdi, CTX; 499 movq %rsi, %r11; 500 movq %rdx, %r12; 501 502 vpcmpeqd RTMP, RTMP, RTMP; 503 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ 504 505 vpcmpeqd RKR, RKR, RKR; 506 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */ 507 vmovdqa .Lbswap_iv_mask, R1ST; 508 vmovdqa .Lbswap128_mask, RKM; 509 510 /* load IV and byteswap */ 511 vmovq (%rcx), RX; 512 vpshufb R1ST, RX, RX; 513 514 /* construct IVs */ 515 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */ 516 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */ 517 vpsubq RKR, RX, RX; 518 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */ 519 vpsubq RKR, RX, RX; 520 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */ 521 vpsubq RKR, RX, RX; 522 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */ 523 vpsubq RKR, RX, RX; 524 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */ 525 vpsubq RKR, RX, RX; 526 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */ 527 vpsubq RKR, RX, RX; 528 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */ 529 vpsubq RKR, RX, RX; 530 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */ 531 532 /* store last IV */ 533 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */ 534 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */ 535 vmovq RX, (%rcx); 536 537 call __cast5_enc_blk16; 538 539 /* dst = src ^ iv */ 540 vpxor (0*16)(%r12), RR1, RR1; 541 vpxor (1*16)(%r12), RL1, RL1; 542 vpxor (2*16)(%r12), RR2, RR2; 543 vpxor (3*16)(%r12), RL2, RL2; 544 vpxor (4*16)(%r12), RR3, RR3; 545 vpxor (5*16)(%r12), RL3, RL3; 546 vpxor (6*16)(%r12), RR4, RR4; 547 vpxor (7*16)(%r12), RL4, RL4; 548 vmovdqu RR1, (0*16)(%r11); 549 vmovdqu RL1, (1*16)(%r11); 550 vmovdqu RR2, (2*16)(%r11); 551 vmovdqu RL2, (3*16)(%r11); 552 vmovdqu RR3, (4*16)(%r11); 553 vmovdqu RL3, (5*16)(%r11); 554 vmovdqu RR4, (6*16)(%r11); 555 vmovdqu RL4, (7*16)(%r11); 556 557 popq %r15; 558 popq %r12; 559 FRAME_END 560 RET; 561SYM_FUNC_END(cast5_ctr_16way) 562