Lines Matching refs:rsp
215 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
216 "&mov (\"4*$c1(%rsp)\",$xc_)",
217 "&mov ($xc,\"4*$c2(%rsp)\")",
218 "&mov ($xc_,\"4*$c3(%rsp)\")",
284 sub \$64+24,%rsp
294 #movdqa %xmm0,4*0(%rsp) # key[0]
295 movdqa %xmm1,4*4(%rsp) # key[1]
296 movdqa %xmm2,4*8(%rsp) # key[2]
297 movdqa %xmm3,4*12(%rsp) # key[3]
307 mov 4*4(%rsp),@x[4]
308 mov 4*5(%rsp),@x[5]
309 mov 4*6(%rsp),@x[6]
310 mov 4*7(%rsp),@x[7]
312 mov 4*13(%rsp),@x[13]
313 mov 4*14(%rsp),@x[14]
314 mov 4*15(%rsp),@x[15]
316 mov %rbp,64+0(%rsp) # save len
318 mov $inp,64+8(%rsp) # save inp
320 mov $out,64+16(%rsp) # save out
334 mov @t[1],4*9(%rsp) # modulo-scheduled
335 mov @t[0],4*8(%rsp)
336 mov 64(%rsp),%rbp # load len
338 mov 64+8(%rsp),$inp # load inp
340 mov 64+16(%rsp),$out # load out
346 add 4*4(%rsp),@x[4]
347 add 4*5(%rsp),@x[5]
348 add 4*6(%rsp),@x[6]
349 add 4*7(%rsp),@x[7]
350 add 4*12(%rsp),@x[12]
351 add 4*13(%rsp),@x[13]
352 add 4*14(%rsp),@x[14]
353 add 4*15(%rsp),@x[15]
354 paddd 4*8(%rsp),%xmm1
375 movdqa %xmm2,4*8(%rsp)
376 movd %xmm3,4*12(%rsp)
400 mov @x[0],4*0(%rsp)
401 mov @x[1],4*1(%rsp)
403 mov @x[2],4*2(%rsp)
404 mov @x[3],4*3(%rsp)
405 mov @x[4],4*4(%rsp)
406 mov @x[5],4*5(%rsp)
407 mov @x[6],4*6(%rsp)
408 mov @x[7],4*7(%rsp)
409 movdqa %xmm1,4*8(%rsp)
410 mov @x[12],4*12(%rsp)
411 mov @x[13],4*13(%rsp)
412 mov @x[14],4*14(%rsp)
413 mov @x[15],4*15(%rsp)
417 movzb (%rsp,%rbx),%edx
425 lea 64+24+48(%rsp),%rsi
439 lea (%rsi),%rsp
440 .cfi_def_cfa_register %rsp
484 mov %rsp,%r9 # frame pointer
497 sub \$64+$xframe,%rsp
512 movdqa $a,0x00(%rsp)
513 movdqa $b,0x10(%rsp)
514 movdqa $c,0x20(%rsp)
515 movdqa $d,0x30(%rsp)
522 movdqa 0x00(%rsp),$a
523 movdqa 0x10(%rsp),$b
524 movdqa 0x20(%rsp),$c
525 paddd 0x30(%rsp),$d
527 movdqa $d,0x30(%rsp)
548 paddd 0x00(%rsp),$a
549 paddd 0x10(%rsp),$b
550 paddd 0x20(%rsp),$c
551 paddd 0x30(%rsp),$d
579 movdqa $a,0x00(%rsp)
580 movdqa $b,0x10(%rsp)
581 movdqa $c,0x20(%rsp)
582 movdqa $d,0x30(%rsp)
587 movzb (%rsp,$counter),%ecx
601 lea (%r9),%rsp
602 .cfi_def_cfa_register %rsp
666 mov %rsp,%r9 # frame pointer
668 sub \$64+$xframe,%rsp
689 movdqa $a,0x00(%rsp)
691 movdqa $b,0x10(%rsp)
693 movdqa $c,0x20(%rsp)
695 movdqa $d,0x30(%rsp)
722 paddd 0x00(%rsp),$a
723 paddd 0x10(%rsp),$b
724 paddd 0x20(%rsp),$c
725 paddd 0x30(%rsp),$d
727 paddd 0x00(%rsp),$a1
728 paddd 0x10(%rsp),$b1
729 paddd 0x20(%rsp),$c1
730 paddd 0x30(%rsp),$d1
767 lea (%r9),%rsp
768 .cfi_def_cfa_register %rsp
858 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
859 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
860 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
861 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
915 mov %rsp,%r9 # frame pointer
933 sub \$0x140+$xframe,%rsp
961 lea 0x100(%rsp),%rcx # size optimization
967 movdqa $xa0,0x40(%rsp) # ... and offload
969 movdqa $xa1,0x50(%rsp)
971 movdqa $xa2,0x60(%rsp)
972 movdqa $xa3,0x70(%rsp)
1005 movdqa 0x40(%rsp),$xa0 # re-load smashed key
1006 movdqa 0x50(%rsp),$xa1
1007 movdqa 0x60(%rsp),$xa2
1008 movdqa 0x70(%rsp),$xa3
1024 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
1025 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
1040 paddd 0x40(%rsp),$xa0 # accumulate key material
1041 paddd 0x50(%rsp),$xa1
1042 paddd 0x60(%rsp),$xa2
1043 paddd 0x70(%rsp),$xa3
1065 movdqa $xa0,0x00(%rsp) # offload $xaN
1066 movdqa $xa1,0x10(%rsp)
1067 movdqa 0x20(%rsp),$xa0 # "xc2"
1068 movdqa 0x30(%rsp),$xa1 # "xc3"
1091 movdqa $xa2,0x20(%rsp) # keep offloading $xaN
1092 movdqa $xa3,0x30(%rsp)
1137 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1151 pxor 0x10(%rsp),$xt0
1165 pxor 0x20(%rsp),$xt0
1179 pxor 0x30(%rsp),$xt0
1202 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1204 #movdqa $xt0,0x00(%rsp)
1205 movdqa $xb0,0x10(%rsp)
1206 movdqa $xc0,0x20(%rsp)
1207 movdqa $xd0,0x30(%rsp)
1216 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
1226 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
1229 movdqa $xt0,0x00(%rsp)
1230 movdqa $xb1,0x10(%rsp)
1232 movdqa $xc1,0x20(%rsp)
1234 movdqa $xd1,0x30(%rsp)
1243 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1256 pxor 0x10(%rsp),$xt0
1266 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
1269 movdqa $xt0,0x00(%rsp)
1270 movdqa $xb2,0x10(%rsp)
1272 movdqa $xc2,0x20(%rsp)
1274 movdqa $xd2,0x30(%rsp)
1283 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1297 pxor 0x10(%rsp),$xt0
1311 pxor 0x20(%rsp),$xt0
1321 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
1324 movdqa $xt0,0x00(%rsp)
1325 movdqa $xb3,0x10(%rsp)
1327 movdqa $xc3,0x20(%rsp)
1329 movdqa $xd3,0x30(%rsp)
1333 movzb (%rsp,%r10),%ecx
1355 lea (%r9),%rsp
1356 .cfi_def_cfa_register %rsp
1448 mov %rsp,%r9 # frame pointer
1450 sub \$0x140+$xframe,%rsp
1480 lea 0x100(%rsp),%rcx # size optimization
1484 vmovdqa $xa0,0x40(%rsp) # ... and offload
1486 vmovdqa $xa1,0x50(%rsp)
1488 vmovdqa $xa2,0x60(%rsp)
1489 vmovdqa $xa3,0x70(%rsp)
1522 vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
1523 vmovdqa 0x50(%rsp),$xa1
1524 vmovdqa 0x60(%rsp),$xa2
1525 vmovdqa 0x70(%rsp),$xa3
1554 vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
1555 vpaddd 0x50(%rsp),$xa1,$xa1
1556 vpaddd 0x60(%rsp),$xa2,$xa2
1557 vpaddd 0x70(%rsp),$xa3,$xa3
1559 vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
1560 vmovdqa $xt3,0x30(%rsp)
1578 vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
1579 vmovdqa $xa1,0x10(%rsp)
1580 vmovdqa 0x20(%rsp),$xa0 # "xc2"
1581 vmovdqa 0x30(%rsp),$xa1 # "xc3"
1628 vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
1629 vmovdqa 0x10(%rsp),$xa1
1687 vmovdqa $xa0,0x00(%rsp)
1688 vmovdqa $xb0,0x10(%rsp)
1689 vmovdqa $xc0,0x20(%rsp)
1690 vmovdqa $xd0,0x30(%rsp)
1706 vmovdqa $xa1,0x00(%rsp)
1708 vmovdqa $xb1,0x10(%rsp)
1710 vmovdqa $xc1,0x20(%rsp)
1712 vmovdqa $xd1,0x30(%rsp)
1737 vmovdqa $xa2,0x00(%rsp)
1739 vmovdqa $xb2,0x10(%rsp)
1741 vmovdqa $xc2,0x20(%rsp)
1743 vmovdqa $xd2,0x30(%rsp)
1778 vmovdqa $xa3,0x00(%rsp)
1780 vmovdqa $xb3,0x10(%rsp)
1782 vmovdqa $xc3,0x20(%rsp)
1784 vmovdqa $xd3,0x30(%rsp)
1788 movzb (%rsp,%r10),%ecx
1811 lea (%r9),%rsp
1812 .cfi_def_cfa_register %rsp
1897 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
1898 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
1899 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
1900 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
1950 mov %rsp,%r9 # frame register
1952 sub \$0x280+$xframe,%rsp
1953 and \$-32,%rsp
1984 lea 0x100(%rsp),%rcx # size optimization
1985 lea 0x200(%rsp),%rax # size optimization
2048 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
2049 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
2064 lea 0x200(%rsp),%rax # size optimization
2109 vmovdqa $xa0,0x00(%rsp) # offload $xaN
2110 vmovdqa $xa1,0x20(%rsp)
2111 vmovdqa 0x40(%rsp),$xc2 # $xa0
2112 vmovdqa 0x60(%rsp),$xc3 # $xa1
2160 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
2161 vmovdqa 0x20(%rsp),$xa1
2232 vmovdqa $xa0,0x00(%rsp)
2233 vmovdqa $xb0,0x20(%rsp)
2246 vmovdqa $xc0,0x00(%rsp)
2249 vmovdqa $xd0,0x20(%rsp)
2266 vmovdqa $xa1,0x00(%rsp)
2269 vmovdqa $xb1,0x20(%rsp)
2290 vmovdqa $xc1,0x00(%rsp)
2293 vmovdqa $xd1,0x20(%rsp)
2318 vmovdqa $xa2,0x00(%rsp)
2321 vmovdqa $xb2,0x20(%rsp)
2350 vmovdqa $xc2,0x00(%rsp)
2353 vmovdqa $xd2,0x20(%rsp)
2386 vmovdqa $xa3,0x00(%rsp)
2389 vmovdqa $xb3,0x20(%rsp)
2426 vmovdqa $xc3,0x00(%rsp)
2429 vmovdqa $xd3,0x20(%rsp)
2433 movzb (%rsp,%r10),%ecx
2456 lea (%r9),%rsp
2457 .cfi_def_cfa_register %rsp
2512 mov %rsp,%r9 # frame pointer
2517 sub \$64+$xframe,%rsp
2666 vmovdqa %x#$a,0x00(%rsp)
2667 vmovdqa %x#$b,0x10(%rsp)
2668 vmovdqa %x#$c,0x20(%rsp)
2669 vmovdqa %x#$d,0x30(%rsp)
2675 vmovdqa $t0,0x00(%rsp)
2676 vmovdqa $t1,0x10(%rsp)
2677 vmovdqa $t2,0x20(%rsp)
2678 vmovdqa $t3,0x30(%rsp)
2683 movzb (%rsp,$counter),%ecx
2690 vmovdqu32 $a_,0x00(%rsp)
2708 lea (%r9),%rsp
2709 .cfi_def_cfa_register %rsp
2724 mov %rsp,%r9 # frame pointer
2729 sub \$64+$xframe,%rsp
2834 vmovdqa %x#$a,0x00(%rsp)
2835 vmovdqa %x#$b,0x10(%rsp)
2836 vmovdqa %x#$c,0x20(%rsp)
2837 vmovdqa %x#$d,0x30(%rsp)
2843 vmovdqa $t0,0x00(%rsp)
2844 vmovdqa $t1,0x10(%rsp)
2845 vmovdqa $t2,0x20(%rsp)
2846 vmovdqa $t3,0x30(%rsp)
2851 movzb (%rsp,$counter),%ecx
2858 vmovdqu32 $a_,0x00(%rsp)
2859 vmovdqu32 $a_,0x20(%rsp)
2877 lea (%r9),%rsp
2878 .cfi_def_cfa_register %rsp
2965 mov %rsp,%r9 # frame register
2967 sub \$64+$xframe,%rsp
2968 and \$-64,%rsp
3351 vmovdqa32 $xa0,0x00(%rsp)
3357 movzb (%rsp,%r10),%ecx
3365 vmovdqa32 $xa0,0(%rsp)
3383 lea (%r9),%rsp
3384 .cfi_def_cfa_register %rsp
3405 mov %rsp,%r9 # frame register
3407 sub \$64+$xframe,%rsp
3408 and \$-64,%rsp
3739 vmovdqa $xa0,0x00(%rsp)
3740 vmovdqa $xb0,0x20(%rsp)
3746 movzb (%rsp,%r10),%ecx
3754 vmovdqa $xa0,0x00(%rsp)
3755 vmovdqa $xa0,0x20(%rsp)
3773 lea (%r9),%rsp
3774 .cfi_def_cfa_register %rsp
3804 sub \$64,%rsp
3857 mov %r10,32(%rsp) # arg5
3858 mov %r11,40(%rsp) # arg6
3859 mov %r12,48(%rsp) # arg7
3860 mov %rcx,56(%rsp) # arg8, (NULL)
3864 add \$64,%rsp
3889 sub \$64,%rsp