openssl/crypto/sha/asm/sha1-586.pl

231 lines
6.9 KiB
Perl
Raw Normal View History

2006-08-31 21:27:30 +00:00
#!/usr/bin/env perl
# ====================================================================
# [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
2006-08-31 21:27:30 +00:00
# ====================================================================
# "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
# functions were re-implemented to address P4 performance issue [see
# commentary below], and in 2006 the rest was rewritten in order to
# gain freedom to liberate licensing terms.
# January, September 2004.
#
2004-01-21 08:17:08 +00:00
# It was noted that Intel IA-32 C compiler generates code which
# performs ~30% *faster* on P4 CPU than original *hand-coded*
# SHA1 assembler implementation. To address this problem (and
# prove that humans are still better than machines:-), the
# original code was overhauled, which resulted in following
# performance changes:
#
# compared with original compared with Intel cc
# assembler impl. generated code
# Pentium -16% +48%
2004-01-21 08:17:08 +00:00
# PIII/AMD +8% +16%
# P4 +85%(!) +45%
#
# As you can see Pentium came out as looser:-( Yet I reckoned that
# improvement on P4 outweights the loss and incorporate this
# re-tuned code to 0.9.7 and later.
# ----------------------------------------------------------------
# <appro@fy.chalmers.se>
# August 2009.
#
# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
# '(c&d) + (b&(c^d))', which allows to accumulate partial results
# and lighten "pressure" on scratch registers. This resulted in
# >12% performance improvement on contemporary AMD cores (with no
# degradation on other CPUs:-). Also, the code was revised to maximize
# "distance" between instructions producing input to 'lea' instruction
# and the 'lea' instruction itself, which is essential for Intel Atom
# core.
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
push(@INC,"${dir}","${dir}../../perlasm");
require "x86asm.pl";
1999-03-31 12:38:27 +00:00
&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
$A="eax";
2006-08-31 21:27:30 +00:00
$B="ebx";
$C="ecx";
$D="edx";
$E="edi";
$T="esi";
$tmp1="ebp";
2006-08-31 21:27:30 +00:00
@V=($A,$B,$C,$D,$E,$T);
sub BODY_00_15
{
2006-08-31 21:27:30 +00:00
local($n,$a,$b,$c,$d,$e,$f)=@_;
&comment("00_15 $n");
&mov($f,$c); # f to hold F_00_19(b,c,d)
if ($n==0) { &mov($tmp1,$a); }
else { &mov($a,$tmp1); }
2004-01-21 08:17:08 +00:00
&rotl($tmp1,5); # tmp1=ROTATE(a,5)
&xor($f,$d);
2006-08-31 21:27:30 +00:00
&add($tmp1,$e); # tmp1+=e;
&mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
2006-08-31 21:27:30 +00:00
# with xi, also note that e becomes
# f in next round...
&and($f,$b);
&rotr($b,2); # b=ROTATE(b,30)
&xor($f,$d); # f holds F_00_19(b,c,d)
&lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
&add($f,$tmp1); } # f+=tmp1
2006-08-31 21:27:30 +00:00
else { &add($tmp1,$f); } # f becomes a in next round
}
sub BODY_16_19
{
2006-08-31 21:27:30 +00:00
local($n,$a,$b,$c,$d,$e,$f)=@_;
&comment("16_19 $n");
&mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
&xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
&xor($tmp1,$d);
&xor($f,&swtmp(($n+8)%16));
&and($tmp1,$b);
2006-08-31 21:27:30 +00:00
&xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
&rotl($f,1); # f=ROTATE(f,1)
&xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
&add($e,$tmp1); # e+=F_00_19(b,c,d)
&mov($tmp1,$a);
&rotr($b,2); # b=ROTATE(b,30)
&mov(&swtmp($n%16),$f); # xi=f
&rotl($tmp1,5); # ROTATE(a,5)
&lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
&mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
&add($f,$tmp1); # f+=ROTATE(a,5)
}
sub BODY_20_39
{
2006-08-31 21:27:30 +00:00
local($n,$a,$b,$c,$d,$e,$f)=@_;
local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
&comment("20_39 $n");
&mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
&xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
&xor($tmp1,$c);
2006-08-31 21:27:30 +00:00
&xor($f,&swtmp(($n+8)%16));
&xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
2006-08-31 21:27:30 +00:00
&xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
2004-01-21 08:17:08 +00:00
&rotl($f,1); # f=ROTATE(f,1)
&add($e,$tmp1); # e+=F_20_39(b,c,d)
&rotr($b,2); # b=ROTATE(b,30)
&mov($tmp1,$a);
&rotl($tmp1,5); # ROTATE(a,5)
&mov(&swtmp($n%16),$f) if($n<77);# xi=f
&lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
&mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
&add($f,$tmp1); # f+=ROTATE(a,5)
}
sub BODY_40_59
{
2006-08-31 21:27:30 +00:00
local($n,$a,$b,$c,$d,$e,$f)=@_;
&comment("40_59 $n");
&mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
&xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
&xor($tmp1,$d);
&xor($f,&swtmp(($n+8)%16));
&and($tmp1,$b);
&xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
2004-01-21 08:17:08 +00:00
&rotl($f,1); # f=ROTATE(f,1)
&add($tmp1,$e); # b&(c^d)+=e
2004-01-21 08:17:08 +00:00
&rotr($b,2); # b=ROTATE(b,30)
&mov($e,$a); # e becomes volatile
&rotl($e,5); # ROTATE(a,5)
&mov(&swtmp($n%16),$f); # xi=f
&lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
&mov($tmp1,$c);
&add($f,$e); # f+=ROTATE(a,5)
&and($tmp1,$d);
&mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
&add($f,$tmp1); # f+=c&d
}
&function_begin("sha1_block_data_order");
2006-08-31 21:27:30 +00:00
&mov($tmp1,&wparam(0)); # SHA_CTX *c
&mov($T,&wparam(1)); # const void *input
&mov($A,&wparam(2)); # size_t num
&stack_push(16); # allocate X[16]
&shl($A,6);
&add($A,$T);
&mov(&wparam(2),$A); # pointer beyond the end of input
&mov($E,&DWP(16,$tmp1));# pre-load E
2006-08-31 21:27:30 +00:00
&set_label("loop",16);
# copy input chunk to X, but reversing byte order!
for ($i=0; $i<16; $i+=4)
{
2006-08-31 21:27:30 +00:00
&mov($A,&DWP(4*($i+0),$T));
&mov($B,&DWP(4*($i+1),$T));
&mov($C,&DWP(4*($i+2),$T));
&mov($D,&DWP(4*($i+3),$T));
&bswap($A);
&bswap($B);
&bswap($C);
&bswap($D);
&mov(&swtmp($i+0),$A);
2006-08-31 21:27:30 +00:00
&mov(&swtmp($i+1),$B);
&mov(&swtmp($i+2),$C);
&mov(&swtmp($i+3),$D);
}
&mov(&wparam(1),$T); # redundant in 1st spin
&mov($A,&DWP(0,$tmp1)); # load SHA_CTX
&mov($B,&DWP(4,$tmp1));
&mov($C,&DWP(8,$tmp1));
&mov($D,&DWP(12,$tmp1));
# E is pre-loaded
for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
(($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
&mov($tmp1,&wparam(0)); # re-load SHA_CTX*
&mov($D,&wparam(1)); # D is last "T" and is discarded
&add($E,&DWP(0,$tmp1)); # E is last "A"...
&add($T,&DWP(4,$tmp1));
&add($A,&DWP(8,$tmp1));
&add($B,&DWP(12,$tmp1));
&add($C,&DWP(16,$tmp1));
&mov(&DWP(0,$tmp1),$E); # update SHA_CTX
&add($D,64); # advance input pointer
&mov(&DWP(4,$tmp1),$T);
&cmp($D,&wparam(2)); # have we reached the end yet?
&mov(&DWP(8,$tmp1),$A);
&mov($E,$C); # C is last "E" which needs to be "pre-loaded"
&mov(&DWP(12,$tmp1),$B);
&mov($T,$D); # input pointer
&mov(&DWP(16,$tmp1),$C);
&jb(&label("loop"));
&stack_pop(16);
&function_end("sha1_block_data_order");
&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
2006-08-31 21:27:30 +00:00
&asm_finish();