x86_64 assembly pack: add AVX512 ChaCha20 and Poly1305 code paths.
Reviewed-by: Rich Salz <rsalz@openssl.org>
This commit is contained in:
parent
f2d78649fb
commit
abb8c44fba
3 changed files with 1213 additions and 12 deletions
|
@ -18,6 +18,10 @@
|
|||
#
|
||||
# ChaCha20 for x86_64.
|
||||
#
|
||||
# December 2016
|
||||
#
|
||||
# Add AVX512F code path.
|
||||
#
|
||||
# Performance in cycles per byte out of large buffer.
|
||||
#
|
||||
# IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2
|
||||
|
@ -58,12 +62,13 @@ die "can't locate x86_64-xlate.pl";
|
|||
|
||||
if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
|
||||
=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
|
||||
$avx = ($1>=2.19) + ($1>=2.22);
|
||||
$avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
|
||||
}
|
||||
|
||||
if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
|
||||
`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
|
||||
$avx = ($1>=2.09) + ($1>=2.10);
|
||||
$avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
|
||||
$avx += 1 if ($1==2.11 && $2>=8);
|
||||
}
|
||||
|
||||
if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
|
||||
|
@ -105,6 +110,11 @@ $code.=<<___;
|
|||
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
|
||||
.Lsigma:
|
||||
.asciz "expand 32-byte k"
|
||||
.align 64
|
||||
.Lincz:
|
||||
.long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
||||
.Lsixteen:
|
||||
.long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
|
||||
.asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
___
|
||||
|
||||
|
@ -1721,6 +1731,12 @@ $code.=<<___;
|
|||
.align 32
|
||||
ChaCha20_8x:
|
||||
.LChaCha20_8x:
|
||||
___
|
||||
$code.=<<___ if ($avx>2);
|
||||
test \$`1<<16`,%r10d # check for AVX512F
|
||||
jnz .LChaCha20_16x
|
||||
___
|
||||
$code.=<<___;
|
||||
mov %rsp,%r10
|
||||
sub \$0x280+$xframe,%rsp
|
||||
and \$-32,%rsp
|
||||
|
@ -2212,7 +2228,7 @@ $code.=<<___;
|
|||
jnz .Loop_tail8x
|
||||
|
||||
.Ldone8x:
|
||||
vzeroall
|
||||
vzeroupper
|
||||
___
|
||||
$code.=<<___ if ($win64);
|
||||
lea 0x290+0x30(%rsp),%r11
|
||||
|
@ -2234,6 +2250,506 @@ $code.=<<___;
|
|||
___
|
||||
}
|
||||
|
||||
########################################################################
|
||||
# AVX512 code paths
|
||||
if ($avx>2) {
|
||||
my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
|
||||
$xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
|
||||
my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
|
||||
$xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
|
||||
my @key=map("%zmm$_",(16..31));
|
||||
my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
|
||||
|
||||
sub AVX512_lane_ROUND {
|
||||
my ($a0,$b0,$c0,$d0)=@_;
|
||||
my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
|
||||
my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
|
||||
my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
|
||||
my @x=map("\"$_\"",@xx);
|
||||
|
||||
(
|
||||
"&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
|
||||
"&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
|
||||
"&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
|
||||
"&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
|
||||
"&vpxord (@x[$d0],@x[$d0],@x[$a0])",
|
||||
"&vpxord (@x[$d1],@x[$d1],@x[$a1])",
|
||||
"&vpxord (@x[$d2],@x[$d2],@x[$a2])",
|
||||
"&vpxord (@x[$d3],@x[$d3],@x[$a3])",
|
||||
"&vprold (@x[$d0],@x[$d0],16)",
|
||||
"&vprold (@x[$d1],@x[$d1],16)",
|
||||
"&vprold (@x[$d2],@x[$d2],16)",
|
||||
"&vprold (@x[$d3],@x[$d3],16)",
|
||||
|
||||
"&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
|
||||
"&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
|
||||
"&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
|
||||
"&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
|
||||
"&vpxord (@x[$b0],@x[$b0],@x[$c0])",
|
||||
"&vpxord (@x[$b1],@x[$b1],@x[$c1])",
|
||||
"&vpxord (@x[$b2],@x[$b2],@x[$c2])",
|
||||
"&vpxord (@x[$b3],@x[$b3],@x[$c3])",
|
||||
"&vprold (@x[$b0],@x[$b0],12)",
|
||||
"&vprold (@x[$b1],@x[$b1],12)",
|
||||
"&vprold (@x[$b2],@x[$b2],12)",
|
||||
"&vprold (@x[$b3],@x[$b3],12)",
|
||||
|
||||
"&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
|
||||
"&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
|
||||
"&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
|
||||
"&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
|
||||
"&vpxord (@x[$d0],@x[$d0],@x[$a0])",
|
||||
"&vpxord (@x[$d1],@x[$d1],@x[$a1])",
|
||||
"&vpxord (@x[$d2],@x[$d2],@x[$a2])",
|
||||
"&vpxord (@x[$d3],@x[$d3],@x[$a3])",
|
||||
"&vprold (@x[$d0],@x[$d0],8)",
|
||||
"&vprold (@x[$d1],@x[$d1],8)",
|
||||
"&vprold (@x[$d2],@x[$d2],8)",
|
||||
"&vprold (@x[$d3],@x[$d3],8)",
|
||||
|
||||
"&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
|
||||
"&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
|
||||
"&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
|
||||
"&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
|
||||
"&vpxord (@x[$b0],@x[$b0],@x[$c0])",
|
||||
"&vpxord (@x[$b1],@x[$b1],@x[$c1])",
|
||||
"&vpxord (@x[$b2],@x[$b2],@x[$c2])",
|
||||
"&vpxord (@x[$b3],@x[$b3],@x[$c3])",
|
||||
"&vprold (@x[$b0],@x[$b0],7)",
|
||||
"&vprold (@x[$b1],@x[$b1],7)",
|
||||
"&vprold (@x[$b2],@x[$b2],7)",
|
||||
"&vprold (@x[$b3],@x[$b3],7)"
|
||||
);
|
||||
}
|
||||
|
||||
my $xframe = $win64 ? 0xb0 : 8;
|
||||
|
||||
$code.=<<___;
|
||||
.type ChaCha20_16x,\@function,5
|
||||
.align 32
|
||||
ChaCha20_16x:
|
||||
.LChaCha20_16x:
|
||||
mov %rsp,%r11
|
||||
sub \$64+$xframe,%rsp
|
||||
and \$-64,%rsp
|
||||
___
|
||||
$code.=<<___ if ($win64);
|
||||
lea 0x290+0x30(%rsp),%r11
|
||||
movaps %xmm6,-0x30(%r11)
|
||||
movaps %xmm7,-0x20(%r11)
|
||||
movaps %xmm8,-0x10(%r11)
|
||||
movaps %xmm9,0x00(%r11)
|
||||
movaps %xmm10,0x10(%r11)
|
||||
movaps %xmm11,0x20(%r11)
|
||||
movaps %xmm12,0x30(%r11)
|
||||
movaps %xmm13,0x40(%r11)
|
||||
movaps %xmm14,0x50(%r11)
|
||||
movaps %xmm15,0x60(%r11)
|
||||
___
|
||||
$code.=<<___;
|
||||
vzeroupper
|
||||
|
||||
lea .Lsigma(%rip),%r10
|
||||
vbroadcasti32x4 (%r10),$xa3 # key[0]
|
||||
vbroadcasti32x4 ($key),$xb3 # key[1]
|
||||
vbroadcasti32x4 16($key),$xc3 # key[2]
|
||||
vbroadcasti32x4 ($counter),$xd3 # key[3]
|
||||
|
||||
vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
|
||||
vpshufd \$0x55,$xa3,$xa1
|
||||
vpshufd \$0xaa,$xa3,$xa2
|
||||
vpshufd \$0xff,$xa3,$xa3
|
||||
vmovdqa64 $xa0,@key[0]
|
||||
vmovdqa64 $xa1,@key[1]
|
||||
vmovdqa64 $xa2,@key[2]
|
||||
vmovdqa64 $xa3,@key[3]
|
||||
|
||||
vpshufd \$0x00,$xb3,$xb0
|
||||
vpshufd \$0x55,$xb3,$xb1
|
||||
vpshufd \$0xaa,$xb3,$xb2
|
||||
vpshufd \$0xff,$xb3,$xb3
|
||||
vmovdqa64 $xb0,@key[4]
|
||||
vmovdqa64 $xb1,@key[5]
|
||||
vmovdqa64 $xb2,@key[6]
|
||||
vmovdqa64 $xb3,@key[7]
|
||||
|
||||
vpshufd \$0x00,$xc3,$xc0
|
||||
vpshufd \$0x55,$xc3,$xc1
|
||||
vpshufd \$0xaa,$xc3,$xc2
|
||||
vpshufd \$0xff,$xc3,$xc3
|
||||
vmovdqa64 $xc0,@key[8]
|
||||
vmovdqa64 $xc1,@key[9]
|
||||
vmovdqa64 $xc2,@key[10]
|
||||
vmovdqa64 $xc3,@key[11]
|
||||
|
||||
vpshufd \$0x00,$xd3,$xd0
|
||||
vpshufd \$0x55,$xd3,$xd1
|
||||
vpshufd \$0xaa,$xd3,$xd2
|
||||
vpshufd \$0xff,$xd3,$xd3
|
||||
vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
|
||||
vmovdqa64 $xd0,@key[12]
|
||||
vmovdqa64 $xd1,@key[13]
|
||||
vmovdqa64 $xd2,@key[14]
|
||||
vmovdqa64 $xd3,@key[15]
|
||||
|
||||
mov \$10,%eax
|
||||
jmp .Loop16x
|
||||
|
||||
.align 32
|
||||
.Loop_outer16x:
|
||||
vpbroadcastd 0(%r10),$xa0 # reload key
|
||||
vpbroadcastd 4(%r10),$xa1
|
||||
vpbroadcastd 8(%r10),$xa2
|
||||
vpbroadcastd 12(%r10),$xa3
|
||||
vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
|
||||
vmovdqa64 @key[4],$xb0
|
||||
vmovdqa64 @key[5],$xb1
|
||||
vmovdqa64 @key[6],$xb2
|
||||
vmovdqa64 @key[7],$xb3
|
||||
vmovdqa64 @key[8],$xc0
|
||||
vmovdqa64 @key[9],$xc1
|
||||
vmovdqa64 @key[10],$xc2
|
||||
vmovdqa64 @key[11],$xc3
|
||||
vmovdqa64 @key[12],$xd0
|
||||
vmovdqa64 @key[13],$xd1
|
||||
vmovdqa64 @key[14],$xd2
|
||||
vmovdqa64 @key[15],$xd3
|
||||
|
||||
vmovdqa64 $xa0,@key[0]
|
||||
vmovdqa64 $xa1,@key[1]
|
||||
vmovdqa64 $xa2,@key[2]
|
||||
vmovdqa64 $xa3,@key[3]
|
||||
|
||||
mov \$10,%eax
|
||||
jmp .Loop16x
|
||||
|
||||
.align 32
|
||||
.Loop16x:
|
||||
___
|
||||
foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
|
||||
foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
|
||||
$code.=<<___;
|
||||
dec %eax
|
||||
jnz .Loop16x
|
||||
|
||||
vpaddd @key[0],$xa0,$xa0 # accumulate key
|
||||
vpaddd @key[1],$xa1,$xa1
|
||||
vpaddd @key[2],$xa2,$xa2
|
||||
vpaddd @key[3],$xa3,$xa3
|
||||
|
||||
vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
|
||||
vpunpckldq $xa3,$xa2,$xt3
|
||||
vpunpckhdq $xa1,$xa0,$xa0
|
||||
vpunpckhdq $xa3,$xa2,$xa2
|
||||
vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
|
||||
vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
|
||||
vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
|
||||
vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
|
||||
___
|
||||
($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
|
||||
$code.=<<___;
|
||||
vpaddd @key[4],$xb0,$xb0
|
||||
vpaddd @key[5],$xb1,$xb1
|
||||
vpaddd @key[6],$xb2,$xb2
|
||||
vpaddd @key[7],$xb3,$xb3
|
||||
|
||||
vpunpckldq $xb1,$xb0,$xt2
|
||||
vpunpckldq $xb3,$xb2,$xt3
|
||||
vpunpckhdq $xb1,$xb0,$xb0
|
||||
vpunpckhdq $xb3,$xb2,$xb2
|
||||
vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
|
||||
vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
|
||||
vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
|
||||
vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
|
||||
___
|
||||
($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
|
||||
$code.=<<___;
|
||||
vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
|
||||
vshufi32x4 \$0xee,$xb0,$xa0,$xb0
|
||||
vshufi32x4 \$0x44,$xb1,$xa1,$xa0
|
||||
vshufi32x4 \$0xee,$xb1,$xa1,$xb1
|
||||
vshufi32x4 \$0x44,$xb2,$xa2,$xa1
|
||||
vshufi32x4 \$0xee,$xb2,$xa2,$xb2
|
||||
vshufi32x4 \$0x44,$xb3,$xa3,$xa2
|
||||
vshufi32x4 \$0xee,$xb3,$xa3,$xb3
|
||||
___
|
||||
($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
|
||||
$code.=<<___;
|
||||
vpaddd @key[8],$xc0,$xc0
|
||||
vpaddd @key[9],$xc1,$xc1
|
||||
vpaddd @key[10],$xc2,$xc2
|
||||
vpaddd @key[11],$xc3,$xc3
|
||||
|
||||
vpunpckldq $xc1,$xc0,$xt2
|
||||
vpunpckldq $xc3,$xc2,$xt3
|
||||
vpunpckhdq $xc1,$xc0,$xc0
|
||||
vpunpckhdq $xc3,$xc2,$xc2
|
||||
vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
|
||||
vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
|
||||
vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
|
||||
vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
|
||||
___
|
||||
($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
|
||||
$code.=<<___;
|
||||
vpaddd @key[12],$xd0,$xd0
|
||||
vpaddd @key[13],$xd1,$xd1
|
||||
vpaddd @key[14],$xd2,$xd2
|
||||
vpaddd @key[15],$xd3,$xd3
|
||||
|
||||
vpunpckldq $xd1,$xd0,$xt2
|
||||
vpunpckldq $xd3,$xd2,$xt3
|
||||
vpunpckhdq $xd1,$xd0,$xd0
|
||||
vpunpckhdq $xd3,$xd2,$xd2
|
||||
vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
|
||||
vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
|
||||
vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
|
||||
vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
|
||||
___
|
||||
($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
|
||||
$code.=<<___;
|
||||
vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
|
||||
vshufi32x4 \$0xee,$xd0,$xc0,$xd0
|
||||
vshufi32x4 \$0x44,$xd1,$xc1,$xc0
|
||||
vshufi32x4 \$0xee,$xd1,$xc1,$xd1
|
||||
vshufi32x4 \$0x44,$xd2,$xc2,$xc1
|
||||
vshufi32x4 \$0xee,$xd2,$xc2,$xd2
|
||||
vshufi32x4 \$0x44,$xd3,$xc3,$xc2
|
||||
vshufi32x4 \$0xee,$xd3,$xc3,$xd3
|
||||
___
|
||||
($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
|
||||
$code.=<<___;
|
||||
vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
|
||||
vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
|
||||
vshufi32x4 \$0x88,$xd0,$xb0,$xc0
|
||||
vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
|
||||
vshufi32x4 \$0x88,$xc1,$xa1,$xt1
|
||||
vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
|
||||
vshufi32x4 \$0x88,$xd1,$xb1,$xc1
|
||||
vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
|
||||
vshufi32x4 \$0x88,$xc2,$xa2,$xt2
|
||||
vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
|
||||
vshufi32x4 \$0x88,$xd2,$xb2,$xc2
|
||||
vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
|
||||
vshufi32x4 \$0x88,$xc3,$xa3,$xt3
|
||||
vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
|
||||
vshufi32x4 \$0x88,$xd3,$xb3,$xc3
|
||||
vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
|
||||
___
|
||||
($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
|
||||
($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
|
||||
|
||||
($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
|
||||
$xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
|
||||
($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
|
||||
$xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
|
||||
$code.=<<___;
|
||||
cmp \$64*16,$len
|
||||
jb .Ltail16x
|
||||
|
||||
vpxord 0x00($inp),$xa0,$xa0 # xor with input
|
||||
vpxord 0x40($inp),$xb0,$xb0
|
||||
vpxord 0x80($inp),$xc0,$xc0
|
||||
vpxord 0xc0($inp),$xd0,$xd0
|
||||
vmovdqu32 $xa0,0x00($out)
|
||||
vmovdqu32 $xb0,0x40($out)
|
||||
vmovdqu32 $xc0,0x80($out)
|
||||
vmovdqu32 $xd0,0xc0($out)
|
||||
|
||||
vpxord 0x100($inp),$xa1,$xa1
|
||||
vpxord 0x140($inp),$xb1,$xb1
|
||||
vpxord 0x180($inp),$xc1,$xc1
|
||||
vpxord 0x1c0($inp),$xd1,$xd1
|
||||
vmovdqu32 $xa1,0x100($out)
|
||||
vmovdqu32 $xb1,0x140($out)
|
||||
vmovdqu32 $xc1,0x180($out)
|
||||
vmovdqu32 $xd1,0x1c0($out)
|
||||
|
||||
vpxord 0x200($inp),$xa2,$xa2
|
||||
vpxord 0x240($inp),$xb2,$xb2
|
||||
vpxord 0x280($inp),$xc2,$xc2
|
||||
vpxord 0x2c0($inp),$xd2,$xd2
|
||||
vmovdqu32 $xa2,0x200($out)
|
||||
vmovdqu32 $xb2,0x240($out)
|
||||
vmovdqu32 $xc2,0x280($out)
|
||||
vmovdqu32 $xd2,0x2c0($out)
|
||||
|
||||
vpxord 0x300($inp),$xa3,$xa3
|
||||
vpxord 0x340($inp),$xb3,$xb3
|
||||
vpxord 0x380($inp),$xc3,$xc3
|
||||
vpxord 0x3c0($inp),$xd3,$xd3
|
||||
lea 0x400($inp),$inp
|
||||
vmovdqu32 $xa3,0x300($out)
|
||||
vmovdqu32 $xb3,0x340($out)
|
||||
vmovdqu32 $xc3,0x380($out)
|
||||
vmovdqu32 $xd3,0x3c0($out)
|
||||
lea 0x400($out),$out
|
||||
|
||||
sub \$64*16,$len
|
||||
jnz .Loop_outer16x
|
||||
|
||||
jmp .Ldone16x
|
||||
|
||||
.align 32
|
||||
.Ltail16x:
|
||||
xor %r10,%r10
|
||||
sub $inp,$out
|
||||
cmp \$64*1,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xa0,$xa0 # xor with input
|
||||
vmovdqu32 $xa0,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xb0,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*2,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xb0,$xb0
|
||||
vmovdqu32 $xb0,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xc0,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*3,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xc0,$xc0
|
||||
vmovdqu32 $xc0,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xd0,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*4,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xd0,$xd0
|
||||
vmovdqu32 $xd0,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xa1,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*5,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xa1,$xa1
|
||||
vmovdqu32 $xa1,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xb1,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*6,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xb1,$xb1
|
||||
vmovdqu32 $xb1,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xc1,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*7,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xc1,$xc1
|
||||
vmovdqu32 $xc1,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xd1,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*8,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xd1,$xd1
|
||||
vmovdqu32 $xd1,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xa2,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*9,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xa2,$xa2
|
||||
vmovdqu32 $xa2,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xb2,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*10,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xb2,$xb2
|
||||
vmovdqu32 $xb2,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xc2,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*11,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xc2,$xc2
|
||||
vmovdqu32 $xc2,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xd2,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*12,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xd2,$xd2
|
||||
vmovdqu32 $xd2,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xa3,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*13,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xa3,$xa3
|
||||
vmovdqu32 $xa3,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xb3,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*14,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xb3,$xb3
|
||||
vmovdqu32 $xb3,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xc3,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
cmp \$64*15,$len
|
||||
jb .Less_than_64_16x
|
||||
vpxord ($inp),$xc3,$xc3
|
||||
vmovdqu32 $xc3,($out,$inp)
|
||||
je .Ldone16x
|
||||
vmovdqa32 $xd3,$xa0
|
||||
lea 64($inp),$inp
|
||||
|
||||
.Less_than_64_16x:
|
||||
vmovdqa32 $xa0,0x00(%rsp)
|
||||
lea ($out,$inp),$out
|
||||
and \$63,$len
|
||||
|
||||
.Loop_tail16x:
|
||||
movzb ($inp,%r10),%eax
|
||||
movzb (%rsp,%r10),%ecx
|
||||
lea 1(%r10),%r10
|
||||
xor %ecx,%eax
|
||||
mov %al,-1($out,%r10)
|
||||
dec $len
|
||||
jnz .Loop_tail16x
|
||||
|
||||
.Ldone16x:
|
||||
vzeroupper
|
||||
___
|
||||
$code.=<<___ if ($win64);
|
||||
lea 0x290+0x30(%rsp),%r11
|
||||
movaps -0x30(%r11),%xmm6
|
||||
movaps -0x20(%r11),%xmm7
|
||||
movaps -0x10(%r11),%xmm8
|
||||
movaps 0x00(%r11),%xmm9
|
||||
movaps 0x10(%r11),%xmm10
|
||||
movaps 0x20(%r11),%xmm11
|
||||
movaps 0x30(%r11),%xmm12
|
||||
movaps 0x40(%r11),%xmm13
|
||||
movaps 0x50(%r11),%xmm14
|
||||
movaps 0x60(%r11),%xmm15
|
||||
___
|
||||
$code.=<<___;
|
||||
mov %r11,%rsp
|
||||
ret
|
||||
.size ChaCha20_16x,.-ChaCha20_16x
|
||||
___
|
||||
}
|
||||
|
||||
foreach (split("\n",$code)) {
|
||||
s/\`([^\`]*)\`/eval $1/geo;
|
||||
|
||||
|
|
|
@ -18,6 +18,12 @@
|
|||
#
|
||||
# March 2015
|
||||
#
|
||||
# Initial release.
|
||||
#
|
||||
# December 2016
|
||||
#
|
||||
# Add AVX512F+VL+BW code path.
|
||||
#
|
||||
# Numbers are cycles per processed byte with poly1305_blocks alone,
|
||||
# measured with rdtsc at fixed clock frequency.
|
||||
#
|
||||
|
@ -56,7 +62,7 @@ die "can't locate x86_64-xlate.pl";
|
|||
|
||||
if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
|
||||
=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
|
||||
$avx = ($1>=2.19) + ($1>=2.22);
|
||||
$avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
|
||||
}
|
||||
|
||||
if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
|
||||
|
@ -1569,7 +1575,9 @@ poly1305_blocks_avx2:
|
|||
call __poly1305_init_avx
|
||||
|
||||
.Lproceed_avx2:
|
||||
mov %r15,$len
|
||||
mov %r15,$len # restore $len
|
||||
mov OPENSSL_ia32cap_P+8(%rip),%r10d
|
||||
mov \$`(1<<31|1<<30|1<<16)`,%r11d
|
||||
|
||||
mov 0(%rsp),%r15
|
||||
mov 8(%rsp),%r14
|
||||
|
@ -1584,6 +1592,8 @@ poly1305_blocks_avx2:
|
|||
|
||||
.align 32
|
||||
.Leven_avx2:
|
||||
mov OPENSSL_ia32cap_P+8(%rip),%r10d
|
||||
mov \$`(1<<31|1<<30|1<<16)`,%r11d
|
||||
vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
|
||||
vmovd 4*1($ctx),%x#$H1
|
||||
vmovd 4*2($ctx),%x#$H2
|
||||
|
@ -1592,6 +1602,14 @@ poly1305_blocks_avx2:
|
|||
|
||||
.Ldo_avx2:
|
||||
___
|
||||
$code.=<<___ if ($avx>2);
|
||||
cmp \$512,$len
|
||||
jb .Lskip_avx512
|
||||
and %r11d,%r10d
|
||||
cmp %r11d,%r10d # check for AVX512F+BW+VL
|
||||
je .Lblocks_avx512
|
||||
.Lskip_avx512:
|
||||
___
|
||||
$code.=<<___ if (!$win64);
|
||||
lea -8(%rsp),%r11
|
||||
sub \$0x128,%rsp
|
||||
|
@ -1688,11 +1706,11 @@ $code.=<<___;
|
|||
.align 32
|
||||
.Loop_avx2:
|
||||
################################################################
|
||||
# ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
|
||||
# ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
|
||||
# ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
|
||||
# ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
|
||||
# \________/\________/
|
||||
# ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
|
||||
# ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
|
||||
# ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
|
||||
# ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
|
||||
# \________/\__________/
|
||||
################################################################
|
||||
#vpaddq $H2,$T2,$H2 # accumulate input
|
||||
vpaddq $H0,$T0,$H0
|
||||
|
@ -1996,7 +2014,636 @@ $code.=<<___;
|
|||
ret
|
||||
.size poly1305_blocks_avx2,.-poly1305_blocks_avx2
|
||||
___
|
||||
}
|
||||
#######################################################################
|
||||
if ($avx>2) {
|
||||
# On entry we have input length divisible by 64. But since inner loop
|
||||
# processes 128 bytes per iteration, cases when length is not divisible
|
||||
# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
|
||||
# reason stack layout is kept identical to poly1305_blocks_avx2. If not
|
||||
# for this tail, we wouldn't have to even allocate stack frame...
|
||||
|
||||
my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
|
||||
my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
|
||||
my $PADBIT="%zmm30";
|
||||
my $GATHER="%ymm31";
|
||||
|
||||
$code.=<<___;
|
||||
.type poly1305_blocks_avx512,\@function,4
|
||||
.align 32
|
||||
poly1305_blocks_avx512:
|
||||
.Lblocks_avx512:
|
||||
vzeroupper
|
||||
___
|
||||
$code.=<<___ if (!$win64);
|
||||
lea -8(%rsp),%r11
|
||||
sub \$0x128,%rsp
|
||||
___
|
||||
$code.=<<___ if ($win64);
|
||||
lea -0xf8(%rsp),%r11
|
||||
sub \$0x1c8,%rsp
|
||||
vmovdqa %xmm6,0x50(%r11)
|
||||
vmovdqa %xmm7,0x60(%r11)
|
||||
vmovdqa %xmm8,0x70(%r11)
|
||||
vmovdqa %xmm9,0x80(%r11)
|
||||
vmovdqa %xmm10,0x90(%r11)
|
||||
vmovdqa %xmm11,0xa0(%r11)
|
||||
vmovdqa %xmm12,0xb0(%r11)
|
||||
vmovdqa %xmm13,0xc0(%r11)
|
||||
vmovdqa %xmm14,0xd0(%r11)
|
||||
vmovdqa %xmm15,0xe0(%r11)
|
||||
.Ldo_avx512_body:
|
||||
___
|
||||
$code.=<<___;
|
||||
lea 48+64($ctx),$ctx # size optimization
|
||||
lea .Lconst(%rip),%rcx
|
||||
|
||||
# expand pre-calculated table
|
||||
vmovdqu32 `16*0-64`($ctx),%x#$R0
|
||||
and \$-512,%rsp
|
||||
vmovdqu32 `16*1-64`($ctx),%x#$R1
|
||||
vmovdqu32 `16*2-64`($ctx),%x#$S1
|
||||
vmovdqu32 `16*3-64`($ctx),%x#$R2
|
||||
vmovdqu32 `16*4-64`($ctx),%x#$S2
|
||||
vmovdqu32 `16*5-64`($ctx),%x#$R3
|
||||
vmovdqu32 `16*6-64`($ctx),%x#$S3
|
||||
vmovdqu32 `16*7-64`($ctx),%x#$R4
|
||||
vmovdqu32 `16*8-64`($ctx),%x#$S4
|
||||
vpermq \$0x15,$R0,$R0 # 00003412 -> 12343434
|
||||
vmovdqa64 64(%rcx),$MASK # .Lmask26
|
||||
vpermq \$0x15,$R1,$R1
|
||||
vmovdqa32 128(%rcx),$GATHER # .Lgather
|
||||
vpermq \$0x15,$S1,$S1
|
||||
vpshufd \$0xc8,$R0,$R0 # 12343434 -> 14243444
|
||||
vpermq \$0x15,$R2,$R2
|
||||
vpshufd \$0xc8,$R1,$R1
|
||||
vmovdqa32 $R0,0x00(%rsp) # save in case $len%128 != 0
|
||||
vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
|
||||
vpermq \$0x15,$S2,$S2
|
||||
vpshufd \$0xc8,$S1,$S1
|
||||
vmovdqa32 $R1,0x20(%rsp)
|
||||
vpsrlq \$32,$R1,$T1
|
||||
vpermq \$0x15,$R3,$R3
|
||||
vpshufd \$0xc8,$R2,$R2
|
||||
vmovdqa32 $S1,0x40(%rsp)
|
||||
vpermq \$0x15,$S3,$S3
|
||||
vpshufd \$0xc8,$S2,$S2
|
||||
vpermq \$0x15,$R4,$R4
|
||||
vpshufd \$0xc8,$R3,$R3
|
||||
vmovdqa32 $R2,0x60(%rsp)
|
||||
vpermq \$0x15,$S4,$S4
|
||||
vpshufd \$0xc8,$S3,$S3
|
||||
vmovdqa32 $S2,0x80(%rsp)
|
||||
vpshufd \$0xc8,$R4,$R4
|
||||
vpshufd \$0xc8,$S4,$S4
|
||||
vmovdqa32 $R3,0xa0(%rsp)
|
||||
vmovdqa32 $S3,0xc0(%rsp)
|
||||
vmovdqa32 $R4,0xe0(%rsp)
|
||||
vmovdqa32 $S4,0x100(%rsp)
|
||||
|
||||
################################################################
|
||||
# calculate 5th through 8th powers of the key
|
||||
#
|
||||
# d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
|
||||
# d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
|
||||
# d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
|
||||
# d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
|
||||
# d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
|
||||
|
||||
vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
|
||||
vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
|
||||
vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
|
||||
vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
|
||||
vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
|
||||
vpsrlq \$32,$R2,$T2
|
||||
|
||||
vpmuludq $T1,$S4,$M0
|
||||
vpmuludq $T1,$R0,$M1
|
||||
vpmuludq $T1,$R1,$M2
|
||||
vpmuludq $T1,$R2,$M3
|
||||
vpmuludq $T1,$R3,$M4
|
||||
vpsrlq \$32,$R3,$T3
|
||||
vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
|
||||
vpaddq $M1,$D1,$D1 # d1 += r1'*r0
|
||||
vpaddq $M2,$D2,$D2 # d2 += r1'*r1
|
||||
vpaddq $M3,$D3,$D3 # d3 += r1'*r2
|
||||
vpaddq $M4,$D4,$D4 # d4 += r1'*r3
|
||||
|
||||
vpmuludq $T2,$S3,$M0
|
||||
vpmuludq $T2,$S4,$M1
|
||||
vpmuludq $T2,$R1,$M3
|
||||
vpmuludq $T2,$R2,$M4
|
||||
vpmuludq $T2,$R0,$M2
|
||||
vpsrlq \$32,$R4,$T4
|
||||
vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
|
||||
vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
|
||||
vpaddq $M3,$D3,$D3 # d3 += r2'*r1
|
||||
vpaddq $M4,$D4,$D4 # d4 += r2'*r2
|
||||
vpaddq $M2,$D2,$D2 # d2 += r2'*r0
|
||||
|
||||
vpmuludq $T3,$S2,$M0
|
||||
vpmuludq $T3,$R0,$M3
|
||||
vpmuludq $T3,$R1,$M4
|
||||
vpmuludq $T3,$S3,$M1
|
||||
vpmuludq $T3,$S4,$M2
|
||||
vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
|
||||
vpaddq $M3,$D3,$D3 # d3 += r3'*r0
|
||||
vpaddq $M4,$D4,$D4 # d4 += r3'*r1
|
||||
vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
|
||||
vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
|
||||
|
||||
vpmuludq $T4,$S4,$M3
|
||||
vpmuludq $T4,$R0,$M4
|
||||
vpmuludq $T4,$S1,$M0
|
||||
vpmuludq $T4,$S2,$M1
|
||||
vpmuludq $T4,$S3,$M2
|
||||
vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
|
||||
vpaddq $M4,$D4,$D4 # d4 += r2'*r0
|
||||
vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
|
||||
vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
|
||||
vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
|
||||
|
||||
################################################################
|
||||
# load input
|
||||
vmovdqu64 16*0($inp),%x#$T0
|
||||
vmovdqu64 16*1($inp),%x#$T1
|
||||
vinserti64x2 \$1,16*2($inp),$T0,$T0
|
||||
vinserti64x2 \$1,16*3($inp),$T1,$T1
|
||||
|
||||
################################################################
|
||||
# lazy reduction
|
||||
|
||||
vpsrlq \$26,$D3,$M3
|
||||
vpandq $MASK,$D3,$D3
|
||||
vpaddq $M3,$D4,$D4 # d3 -> d4
|
||||
|
||||
vpsrlq \$26,$D0,$M0
|
||||
vpandq $MASK,$D0,$D0
|
||||
vpaddq $M0,$D1,$D1 # d0 -> d1
|
||||
|
||||
vpsrlq \$26,$D4,$M4
|
||||
vpandq $MASK,$D4,$D4
|
||||
|
||||
vpsrlq \$26,$D1,$M1
|
||||
vpandq $MASK,$D1,$D1
|
||||
vpaddq $M1,$D2,$D2 # d1 -> d2
|
||||
|
||||
vpaddq $M4,$D0,$D0
|
||||
vpsllq \$2,$M4,$M4
|
||||
vpaddq $M4,$D0,$D0 # d4 -> d0
|
||||
|
||||
vpsrlq \$26,$D2,$M2
|
||||
vpandq $MASK,$D2,$D2
|
||||
vpaddq $M2,$D3,$D3 # d2 -> d3
|
||||
|
||||
vpsrlq \$26,$D0,$M0
|
||||
vpandq $MASK,$D0,$D0
|
||||
vpaddq $M0,$D1,$D1 # d0 -> d1
|
||||
|
||||
vpsrlq \$26,$D3,$M3
|
||||
vpandq $MASK,$D3,$D3
|
||||
vpaddq $M3,$D4,$D4 # d3 -> d4
|
||||
|
||||
___
|
||||
map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3));
|
||||
map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
|
||||
map(s/%y/%z/,($MASK));
|
||||
$code.=<<___;
|
||||
################################################################
|
||||
# load more input
|
||||
vinserti64x2 \$2,16*4($inp),$T0,$T0
|
||||
vinserti64x2 \$2,16*5($inp),$T1,$T1
|
||||
vinserti64x2 \$3,16*6($inp),$T0,$T0
|
||||
vinserti64x2 \$3,16*7($inp),$T1,$T1
|
||||
lea 16*8($inp),$inp
|
||||
|
||||
vpbroadcastq %x#$MASK,$MASK
|
||||
vpbroadcastq 32(%rcx),$PADBIT
|
||||
|
||||
################################################################
|
||||
# at this point we have 14243444 in $R0-$S4 and 05060708 in
|
||||
# $D0-$D4, and the goal is 1828384858687888 in $R0-$S4
|
||||
|
||||
mov \$0x5555,%eax
|
||||
vpbroadcastq %x#$D0,$M0 # 0808080808080808
|
||||
vpbroadcastq %x#$D1,$M1
|
||||
vpbroadcastq %x#$D2,$M2
|
||||
vpbroadcastq %x#$D3,$M3
|
||||
vpbroadcastq %x#$D4,$M4
|
||||
kmovw %eax,%k3
|
||||
vpsllq \$32,$D0,$D0 # 05060708 -> 50607080
|
||||
vpsllq \$32,$D1,$D1
|
||||
vpsllq \$32,$D2,$D2
|
||||
vpsllq \$32,$D3,$D3
|
||||
vpsllq \$32,$D4,$D4
|
||||
___
|
||||
map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
|
||||
$code.=<<___;
|
||||
vinserti64x4 \$1,$R0,$D0,$D0 # 1424344450607080
|
||||
vinserti64x4 \$1,$R1,$D1,$D1
|
||||
vinserti64x4 \$1,$R2,$D2,$D2
|
||||
vinserti64x4 \$1,$R3,$D3,$D3
|
||||
vinserti64x4 \$1,$R4,$D4,$D4
|
||||
___
|
||||
map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
|
||||
map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
|
||||
$code.=<<___;
|
||||
vpblendmd $M0,$D0,${R0}{%k3} # 1828384858687888
|
||||
vpblendmd $M1,$D1,${R1}{%k3}
|
||||
vpblendmd $M2,$D2,${R2}{%k3}
|
||||
vpblendmd $M3,$D3,${R3}{%k3}
|
||||
vpblendmd $M4,$D4,${R4}{%k3}
|
||||
|
||||
vpslld \$2,$R1,$S1 # *5
|
||||
vpslld \$2,$R2,$S2
|
||||
vpslld \$2,$R3,$S3
|
||||
vpslld \$2,$R4,$S4
|
||||
vpaddd $R1,$S1,$S1
|
||||
vpaddd $R2,$S2,$S2
|
||||
vpaddd $R3,$S3,$S3
|
||||
vpaddd $R4,$S4,$S4
|
||||
|
||||
vpsrldq \$6,$T0,$T2 # splat input
|
||||
vpsrldq \$6,$T1,$T3
|
||||
vpunpckhqdq $T1,$T0,$T4 # 4
|
||||
vpunpcklqdq $T3,$T2,$T2 # 2:3
|
||||
vpunpcklqdq $T1,$T0,$T0 # 0:1
|
||||
|
||||
vpsrlq \$30,$T2,$T3
|
||||
vpsrlq \$4,$T2,$T2
|
||||
vpsrlq \$26,$T0,$T1
|
||||
vpsrlq \$40,$T4,$T4 # 4
|
||||
vpandq $MASK,$T2,$T2 # 2
|
||||
vpandq $MASK,$T0,$T0 # 0
|
||||
#vpandq $MASK,$T1,$T1 # 1
|
||||
#vpandq $MASK,$T3,$T3 # 3
|
||||
#vporq $PADBIT,$T4,$T4 # padbit, yes, always
|
||||
|
||||
vpaddq $H2,$T2,$H2 # accumulate input
|
||||
mov \$0x0f,%eax
|
||||
sub \$192,$len
|
||||
jbe .Ltail_avx512
|
||||
|
||||
.Loop_avx512:
|
||||
################################################################
|
||||
# ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
|
||||
# ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
|
||||
# ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
|
||||
# ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
|
||||
# ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
|
||||
# ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
|
||||
# ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
|
||||
# ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
|
||||
# \________/\___________/
|
||||
################################################################
|
||||
#vpaddq $H2,$T2,$H2 # accumulate input
|
||||
|
||||
# d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
|
||||
# d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
|
||||
# d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
|
||||
# d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
|
||||
# d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
|
||||
#
|
||||
# however, as h2 is "chronologically" first one available pull
|
||||
# corresponding operations up, so it's
|
||||
#
|
||||
# d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
|
||||
# d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
|
||||
# d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
|
||||
# d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
|
||||
# d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
|
||||
|
||||
vpmuludq $H2,$R1,$D3 # d3 = h2*r1
|
||||
vpaddq $H0,$T0,$H0
|
||||
vmovdqu64 16*0($inp),%x#$M0 # load input
|
||||
vpmuludq $H2,$R2,$D4 # d4 = h2*r2
|
||||
vpandq $MASK,$T1,$T1 # 1, module-scheduled
|
||||
vmovdqu64 16*1($inp),%x#$M1
|
||||
vpmuludq $H2,$S3,$D0 # d0 = h2*s3
|
||||
vpandq $MASK,$T3,$T3 # 3
|
||||
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
|
||||
vporq $PADBIT,$T4,$T4 # padbit, yes, always
|
||||
vpmuludq $H2,$R0,$D2 # d2 = h2*r0
|
||||
vpaddq $H1,$T1,$H1 # accumulate input
|
||||
vpaddq $H3,$T3,$H3
|
||||
vpaddq $H4,$T4,$H4
|
||||
|
||||
vinserti64x2 \$1,16*2($inp),$M0,$T0
|
||||
vinserti64x2 \$1,16*3($inp),$M1,$T1
|
||||
vpmuludq $H0,$R3,$M3
|
||||
vpmuludq $H0,$R4,$M4
|
||||
vpmuludq $H0,$R0,$M0
|
||||
vpmuludq $H0,$R1,$M1
|
||||
vpaddq $M3,$D3,$D3 # d3 += h0*r3
|
||||
vpaddq $M4,$D4,$D4 # d4 += h0*r4
|
||||
vpaddq $M0,$D0,$D0 # d0 += h0*r0
|
||||
vpaddq $M1,$D1,$D1 # d1 += h0*r1
|
||||
|
||||
vinserti64x2 \$2,16*4($inp),$T0,$T0
|
||||
vinserti64x2 \$2,16*5($inp),$T1,$T1
|
||||
vpmuludq $H1,$R2,$M3
|
||||
vpmuludq $H1,$R3,$M4
|
||||
vpmuludq $H1,$S4,$M0
|
||||
vpmuludq $H0,$R2,$M2
|
||||
vpaddq $M3,$D3,$D3 # d3 += h1*r2
|
||||
vpaddq $M4,$D4,$D4 # d4 += h1*r3
|
||||
vpaddq $M0,$D0,$D0 # d0 += h1*s4
|
||||
vpaddq $M2,$D2,$D2 # d2 += h0*r2
|
||||
|
||||
vinserti64x2 \$3,16*6($inp),$T0,$T0
|
||||
vinserti64x2 \$3,16*7($inp),$T1,$T1
|
||||
vpmuludq $H3,$R0,$M3
|
||||
vpmuludq $H3,$R1,$M4
|
||||
vpmuludq $H1,$R0,$M1
|
||||
vpmuludq $H1,$R1,$M2
|
||||
vpaddq $M3,$D3,$D3 # d3 += h3*r0
|
||||
vpaddq $M4,$D4,$D4 # d4 += h3*r1
|
||||
vpaddq $M1,$D1,$D1 # d1 += h1*r0
|
||||
vpaddq $M2,$D2,$D2 # d2 += h1*r1
|
||||
|
||||
vpsrldq \$6,$T0,$T2 # splat input
|
||||
vpsrldq \$6,$T1,$T3
|
||||
vpunpckhqdq $T1,$T0,$T4 # 4
|
||||
vpmuludq $H4,$S4,$M3
|
||||
vpmuludq $H4,$R0,$M4
|
||||
vpmuludq $H3,$S2,$M0
|
||||
vpmuludq $H3,$S3,$M1
|
||||
vpaddq $M3,$D3,$D3 # d3 += h4*s4
|
||||
vpmuludq $H3,$S4,$M2
|
||||
vpaddq $M4,$D4,$D4 # d4 += h4*r0
|
||||
vpaddq $M0,$D0,$D0 # d0 += h3*s2
|
||||
vpaddq $M1,$D1,$D1 # d1 += h3*s3
|
||||
vpaddq $M2,$D2,$D2 # d2 += h3*s4
|
||||
|
||||
vpunpcklqdq $T1,$T0,$T0 # 0:1
|
||||
vpunpcklqdq $T3,$T2,$T3 # 2:3
|
||||
lea 16*8($inp),$inp
|
||||
vpmuludq $H4,$S1,$M0
|
||||
vpmuludq $H4,$S2,$M1
|
||||
vpmuludq $H4,$S3,$M2
|
||||
vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
|
||||
vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
|
||||
vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
|
||||
|
||||
################################################################
|
||||
# lazy reduction (interleaved with tail of input splat)
|
||||
|
||||
vpsrlq \$26,$D3,$H3
|
||||
vpandq $MASK,$D3,$D3
|
||||
vpaddq $H3,$D4,$H4 # h3 -> h4
|
||||
|
||||
vpsrlq \$26,$H0,$D0
|
||||
vpandq $MASK,$H0,$H0
|
||||
vpaddq $D0,$H1,$H1 # h0 -> h1
|
||||
|
||||
vpsrlq \$26,$H4,$D4
|
||||
vpandq $MASK,$H4,$H4
|
||||
|
||||
vpsrlq \$4,$T3,$T2
|
||||
|
||||
vpsrlq \$26,$H1,$D1
|
||||
vpandq $MASK,$H1,$H1
|
||||
vpaddq $D1,$H2,$H2 # h1 -> h2
|
||||
|
||||
vpaddq $D4,$H0,$H0
|
||||
vpsllq \$2,$D4,$D4
|
||||
vpaddq $D4,$H0,$H0 # h4 -> h0
|
||||
|
||||
vpandq $MASK,$T2,$T2 # 2
|
||||
vpsrlq \$26,$T0,$T1
|
||||
|
||||
vpsrlq \$26,$H2,$D2
|
||||
vpandq $MASK,$H2,$H2
|
||||
vpaddq $D2,$D3,$H3 # h2 -> h3
|
||||
|
||||
vpaddq $T2,$H2,$H2 # modulo-scheduled
|
||||
vpsrlq \$30,$T3,$T3
|
||||
|
||||
vpsrlq \$26,$H0,$D0
|
||||
vpandq $MASK,$H0,$H0
|
||||
vpaddq $D0,$H1,$H1 # h0 -> h1
|
||||
|
||||
vpsrlq \$40,$T4,$T4 # 4
|
||||
|
||||
vpsrlq \$26,$H3,$D3
|
||||
vpandq $MASK,$H3,$H3
|
||||
vpaddq $D3,$H4,$H4 # h3 -> h4
|
||||
|
||||
vpandq $MASK,$T0,$T0 # 0
|
||||
#vpandq $MASK,$T1,$T1 # 1
|
||||
#vpandq $MASK,$T3,$T3 # 3
|
||||
#vporq $PADBIT,$T4,$T4 # padbit, yes, always
|
||||
|
||||
sub \$128,$len
|
||||
ja .Loop_avx512
|
||||
|
||||
.Ltail_avx512:
|
||||
################################################################
|
||||
# while above multiplications were by r^8 in all lanes, in last
|
||||
# iteration we multiply least significant lane by r^8 and most
|
||||
# significant one by r, that's why table gets shifted...
|
||||
|
||||
vpsrlq \$32,$R0,$R0 # 0102030405060708
|
||||
vpsrlq \$32,$R1,$R1
|
||||
vpsrlq \$32,$R2,$R2
|
||||
vpsrlq \$32,$S3,$S3
|
||||
vpsrlq \$32,$S4,$S4
|
||||
vpsrlq \$32,$R3,$R3
|
||||
vpsrlq \$32,$R4,$R4
|
||||
vpsrlq \$32,$S1,$S1
|
||||
vpsrlq \$32,$S2,$S2
|
||||
|
||||
################################################################
|
||||
# load either next or last 64 byte of input
|
||||
lea ($inp,$len),$inp
|
||||
|
||||
#vpaddq $H2,$T2,$H2 # accumulate input
|
||||
vpaddq $H0,$T0,$H0
|
||||
|
||||
vpmuludq $H2,$R1,$D3 # d3 = h2*r1
|
||||
vpmuludq $H2,$R2,$D4 # d4 = h2*r2
|
||||
vpmuludq $H2,$S3,$D0 # d0 = h2*s3
|
||||
vpmuludq $H2,$S4,$D1 # d1 = h2*s4
|
||||
vpmuludq $H2,$R0,$D2 # d2 = h2*r0
|
||||
vpandq $MASK,$T1,$T1 # 1, module-scheduled
|
||||
vpandq $MASK,$T3,$T3 # 3
|
||||
vporq $PADBIT,$T4,$T4 # padbit, yes, always
|
||||
vpaddq $H1,$T1,$H1 # accumulate input
|
||||
vpaddq $H3,$T3,$H3
|
||||
vpaddq $H4,$T4,$H4
|
||||
|
||||
vmovdqu64 16*0($inp),%x#$T0
|
||||
vpmuludq $H0,$R3,$M3
|
||||
vpmuludq $H0,$R4,$M4
|
||||
vpmuludq $H0,$R0,$M0
|
||||
vpmuludq $H0,$R1,$M1
|
||||
vpaddq $M3,$D3,$D3 # d3 += h0*r3
|
||||
vpaddq $M4,$D4,$D4 # d4 += h0*r4
|
||||
vpaddq $M0,$D0,$D0 # d0 += h0*r0
|
||||
vpaddq $M1,$D1,$D1 # d1 += h0*r1
|
||||
|
||||
vmovdqu64 16*1($inp),%x#$T1
|
||||
vpmuludq $H1,$R2,$M3
|
||||
vpmuludq $H1,$R3,$M4
|
||||
vpmuludq $H1,$S4,$M0
|
||||
vpmuludq $H0,$R2,$M2
|
||||
vpaddq $M3,$D3,$D3 # d3 += h1*r2
|
||||
vpaddq $M4,$D4,$D4 # d4 += h1*r3
|
||||
vpaddq $M0,$D0,$D0 # d0 += h1*s4
|
||||
vpaddq $M2,$D2,$D2 # d2 += h0*r2
|
||||
|
||||
vinserti64x2 \$1,16*2($inp),$T0,$T0
|
||||
vpmuludq $H3,$R0,$M3
|
||||
vpmuludq $H3,$R1,$M4
|
||||
vpmuludq $H1,$R0,$M1
|
||||
vpmuludq $H1,$R1,$M2
|
||||
vpaddq $M3,$D3,$D3 # d3 += h3*r0
|
||||
vpaddq $M4,$D4,$D4 # d4 += h3*r1
|
||||
vpaddq $M1,$D1,$D1 # d1 += h1*r0
|
||||
vpaddq $M2,$D2,$D2 # d2 += h1*r1
|
||||
|
||||
vinserti64x2 \$1,16*3($inp),$T1,$T1
|
||||
vpmuludq $H4,$S4,$M3
|
||||
vpmuludq $H4,$R0,$M4
|
||||
vpmuludq $H3,$S2,$M0
|
||||
vpmuludq $H3,$S3,$M1
|
||||
vpmuludq $H3,$S4,$M2
|
||||
vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
|
||||
vpaddq $M4,$D4,$D4 # d4 += h4*r0
|
||||
vpaddq $M0,$D0,$D0 # d0 += h3*s2
|
||||
vpaddq $M1,$D1,$D1 # d1 += h3*s3
|
||||
vpaddq $M2,$D2,$D2 # d2 += h3*s4
|
||||
|
||||
vpmuludq $H4,$S1,$M0
|
||||
vpmuludq $H4,$S2,$M1
|
||||
vpmuludq $H4,$S3,$M2
|
||||
vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
|
||||
vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
|
||||
vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
|
||||
|
||||
################################################################
|
||||
# horizontal addition
|
||||
|
||||
mov \$1,%eax
|
||||
vpsrldq \$8,$H3,$D3
|
||||
vpsrldq \$8,$D4,$H4
|
||||
vpsrldq \$8,$H0,$D0
|
||||
vpsrldq \$8,$H1,$D1
|
||||
vpsrldq \$8,$H2,$D2
|
||||
vpaddq $D3,$H3,$H3
|
||||
vpaddq $D4,$H4,$H4
|
||||
vpaddq $D0,$H0,$H0
|
||||
vpaddq $D1,$H1,$H1
|
||||
vpaddq $D2,$H2,$H2
|
||||
|
||||
kmovw %eax,%k3
|
||||
vpermq \$0x2,$H3,$D3
|
||||
vpermq \$0x2,$H4,$D4
|
||||
vpermq \$0x2,$H0,$D0
|
||||
vpermq \$0x2,$H1,$D1
|
||||
vpermq \$0x2,$H2,$D2
|
||||
vpaddq $D3,$H3,$H3
|
||||
vpaddq $D4,$H4,$H4
|
||||
vpaddq $D0,$H0,$H0
|
||||
vpaddq $D1,$H1,$H1
|
||||
vpaddq $D2,$H2,$H2
|
||||
|
||||
vextracti64x4 \$0x1,$H3,%y#$D3
|
||||
vextracti64x4 \$0x1,$H4,%y#$D4
|
||||
vextracti64x4 \$0x1,$H0,%y#$D0
|
||||
vextracti64x4 \$0x1,$H1,%y#$D1
|
||||
vextracti64x4 \$0x1,$H2,%y#$D2
|
||||
vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
|
||||
vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
|
||||
vpaddq $D0,$H0,${H0}{%k3}{z}
|
||||
vpaddq $D1,$H1,${H1}{%k3}{z}
|
||||
vpaddq $D2,$H2,${H2}{%k3}{z}
|
||||
___
|
||||
map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
|
||||
map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
|
||||
$code.=<<___;
|
||||
################################################################
|
||||
# lazy reduction (interleaved with input splat)
|
||||
|
||||
vpsrlq \$26,$H3,$D3
|
||||
vpandq $MASK,$H3,$H3
|
||||
vpsrldq \$6,$T0,$T2 # splat input
|
||||
vpsrldq \$6,$T1,$T3
|
||||
vpunpckhqdq $T1,$T0,$T4 # 4
|
||||
vpaddq $D3,$H4,$H4 # h3 -> h4
|
||||
|
||||
vpsrlq \$26,$H0,$D0
|
||||
vpandq $MASK,$H0,$H0
|
||||
vpunpcklqdq $T3,$T2,$T2 # 2:3
|
||||
vpunpcklqdq $T1,$T0,$T0 # 0:1
|
||||
vpaddq $D0,$H1,$H1 # h0 -> h1
|
||||
|
||||
vpsrlq \$26,$H4,$D4
|
||||
vpandq $MASK,$H4,$H4
|
||||
|
||||
vpsrlq \$26,$H1,$D1
|
||||
vpandq $MASK,$H1,$H1
|
||||
vpsrlq \$30,$T2,$T3
|
||||
vpsrlq \$4,$T2,$T2
|
||||
vpaddq $D1,$H2,$H2 # h1 -> h2
|
||||
|
||||
vpaddq $D4,$H0,$H0
|
||||
vpsllq \$2,$D4,$D4
|
||||
vpsrlq \$26,$T0,$T1
|
||||
vpsrlq \$40,$T4,$T4 # 4
|
||||
vpaddq $D4,$H0,$H0 # h4 -> h0
|
||||
|
||||
vpsrlq \$26,$H2,$D2
|
||||
vpandq $MASK,$H2,$H2
|
||||
vpandq $MASK,$T2,$T2 # 2
|
||||
vpandq $MASK,$T0,$T0 # 0
|
||||
vpaddq $D2,$H3,$H3 # h2 -> h3
|
||||
|
||||
vpsrlq \$26,$H0,$D0
|
||||
vpandq $MASK,$H0,$H0
|
||||
vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
|
||||
vpandq $MASK,$T1,$T1 # 1
|
||||
vpaddq $D0,$H1,$H1 # h0 -> h1
|
||||
|
||||
vpsrlq \$26,$H3,$D3
|
||||
vpandq $MASK,$H3,$H3
|
||||
vpandq $MASK,$T3,$T3 # 3
|
||||
vporq $PADBIT,$T4,$T4 # padbit, yes, always
|
||||
vpaddq $D3,$H4,$H4 # h3 -> h4
|
||||
|
||||
lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
|
||||
add \$64,$len
|
||||
jnz .Ltail_avx2
|
||||
|
||||
vpsubq $T2,$H2,$H2 # undo input accumulation
|
||||
vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
|
||||
vmovd %x#$H1,`4*1-48-64`($ctx)
|
||||
vmovd %x#$H2,`4*2-48-64`($ctx)
|
||||
vmovd %x#$H3,`4*3-48-64`($ctx)
|
||||
vmovd %x#$H4,`4*4-48-64`($ctx)
|
||||
___
|
||||
$code.=<<___ if ($win64);
|
||||
vmovdqa 0x50(%r11),%xmm6
|
||||
vmovdqa 0x60(%r11),%xmm7
|
||||
vmovdqa 0x70(%r11),%xmm8
|
||||
vmovdqa 0x80(%r11),%xmm9
|
||||
vmovdqa 0x90(%r11),%xmm10
|
||||
vmovdqa 0xa0(%r11),%xmm11
|
||||
vmovdqa 0xb0(%r11),%xmm12
|
||||
vmovdqa 0xc0(%r11),%xmm13
|
||||
vmovdqa 0xd0(%r11),%xmm14
|
||||
vmovdqa 0xe0(%r11),%xmm15
|
||||
lea 0xf8(%r11),%rsp
|
||||
.Ldo_avx512_epilogue:
|
||||
___
|
||||
$code.=<<___ if (!$win64);
|
||||
lea 8(%r11),%rsp
|
||||
___
|
||||
$code.=<<___;
|
||||
vzeroupper
|
||||
ret
|
||||
.size poly1305_blocks_avx512,.-poly1305_blocks_avx512
|
||||
___
|
||||
} }
|
||||
$code.=<<___;
|
||||
.align 64
|
||||
.Lconst:
|
||||
|
@ -2008,6 +2655,8 @@ $code.=<<___;
|
|||
.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
|
||||
.Lfive:
|
||||
.long 5,0,5,0,5,0,5,0
|
||||
.Lgather:
|
||||
.long 0,8, 32,40, 64,72, 96,104
|
||||
___
|
||||
}
|
||||
|
||||
|
@ -2200,6 +2849,11 @@ $code.=<<___ if ($avx>1);
|
|||
.rva .LSEH_end_poly1305_blocks_avx2
|
||||
.rva .LSEH_info_poly1305_blocks_avx2_3
|
||||
___
|
||||
$code.=<<___ if ($avx>2);
|
||||
.rva .LSEH_begin_poly1305_blocks_avx512
|
||||
.rva .LSEH_end_poly1305_blocks_avx512
|
||||
.rva .LSEH_info_poly1305_blocks_avx512
|
||||
___
|
||||
$code.=<<___;
|
||||
.section .xdata
|
||||
.align 8
|
||||
|
@ -2255,13 +2909,19 @@ $code.=<<___ if ($avx>1);
|
|||
.rva avx_handler
|
||||
.rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
|
||||
___
|
||||
$code.=<<___ if ($avx>2);
|
||||
.LSEH_info_poly1305_blocks_avx512:
|
||||
.byte 9,0,0,0
|
||||
.rva avx_handler
|
||||
.rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
|
||||
___
|
||||
}
|
||||
|
||||
foreach (split('\n',$code)) {
|
||||
s/\`([^\`]*)\`/eval($1)/ge;
|
||||
s/%r([a-z]+)#d/%e$1/g;
|
||||
s/%r([0-9]+)#d/%r$1d/g;
|
||||
s/%x#%y/%x/g;
|
||||
s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;
|
||||
|
||||
print $_,"\n";
|
||||
}
|
||||
|
|
|
@ -3437,6 +3437,31 @@ Ciphertext = 64a0861575861af460f062c79be643bd5e805cfd345cf389f108670ac76c8cb24c6
|
|||
Operation = DECRYPT
|
||||
Result = CIPHERFINAL_ERROR
|
||||
|
||||
# self-generated vectors
|
||||
Cipher = chacha20-poly1305
|
||||
Key = 1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0
|
||||
IV = 000000000102030405060708
|
||||
AAD = f33388860000000000004e91
|
||||
Tag = d96119a40cd17f2527306866a3ef0413
|
||||
Plaintext = 496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d6f6e74687320616e64206d617920626520757064617465642c207265706c616365642c206f72206f62736f6c65746564206279206f7468657220646f63756d656e747320617420616e792074696d652e20497420697320696e617070726f70726961746520746f2075736520496e7465726e65742d4472616674732061732072
|
||||
Ciphertext = 64a0861575861af460f062c79be643bd5e805cfd345cf389f108670ac76c8cb24c6cfc18755d43eea09ee94e382d26b0bdb7b73c321b0100d4f03b7f355894cf332f830e710b97ce98c8a84abd0b948114ad176e008d33bd60f982b1ff37c8559797a06ef4f0ef61c186324e2b3506383606907b6a7c02b0f9f6157b53c867e4b9166c767b804d46a59b5216cde7a4e99040c5a40433225ee282a1b0a06c523eaf4534d7f83fa1155b0047718cbc546a0d072b04b3564eea1b422273f548271a
|
||||
|
||||
Cipher = chacha20-poly1305
|
||||
Key = 1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0
|
||||
IV = 000000000102030405060708
|
||||
AAD = f33388860000000000004e91
|
||||
Tag = 53aee3189d2b747032378a6186feb43f
|
||||
Plaintext = 496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d6f6e74687320616e64206d617920626520757064617465642c207265706c616365642c206f72206f62736f6c65746564206279206f7468657220646f63756d656e747320617420616e792074696d652e20497420697320696e617070726f70726961746520746f2075736520496e7465726e65742d447261667473206173207265666572656e6365206d6174657269616c206f7220746f2063697465207468656d206f74686572207468616e206173202fe2809c776f726b20696e2070726f67496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d6f6e74687320616e64206d617920626520757064617465642c207265706c616365642c206f72206f62736f6c65746564206279206f7468657220646f63756d656e747320617420616e792074696d652e20497420697320696e617070726f70726961746520746f2075736520496e7465726e65742d447261667473206173207265666572656e6365206d6174657269616c206f7220746f2063697465207468656d206f74686572207468616e206173202fe2809c776f726b20696e2070726f67
|
||||
Ciphertext = 64a0861575861af460f062c79be643bd5e805cfd345cf389f108670ac76c8cb24c6cfc18755d43eea09ee94e382d26b0bdb7b73c321b0100d4f03b7f355894cf332f830e710b97ce98c8a84abd0b948114ad176e008d33bd60f982b1ff37c8559797a06ef4f0ef61c186324e2b3506383606907b6a7c02b0f9f6157b53c867e4b9166c767b804d46a59b5216cde7a4e99040c5a40433225ee282a1b0a06c523eaf4534d7f83fa1155b0047718cbc546a0d072b04b3564eea1b422273f548271a0bb2316053fa76991955ebd63159434ecebb4e466dae5a1073a6727627097a1049e617d91d361094fa68f0ff77987130305beaba2eda04df997b714d6c6f2c299da65ba25e6a85842bf0440fd98a9a2266b061c4b3a13327c090f9a0789f58aad805275e4378a525f19232bfbfb749ede38480f405cf43ec2f1f8619ebcbc80a89e92a859c7911e674977ab17d4a7126a6b8a477358ff14a344d276ef6e504e10268ac3619fcf90c2d6c03fc2e3d1f290d9bf26c1fa1495dd8f97eec6229a55c2354e4524143551a5cc370a1c622c9390530cff21c3e1ed50c5e3daf97518ccce34156bdbd7eafab8bd417aef25c6c927301731bd319d247a1d5c3186ed10bfd9a7a24bac30e3e4503ed9204154d338b79ea276e7058e7f20f4d4fd1ac93d63f611af7b6d006c2a72add0eedc497b19cb30a198816664f0da00155f2e2d6ac61
|
||||
|
||||
Cipher = chacha20-poly1305
|
||||
Key = 1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0
|
||||
IV = 000000000102030405060708
|
||||
AAD = f33388860000000000004e91
|
||||
Tag = e0723bce23528ce6ccb10ff9627038bf
|
||||
Plaintext = 496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d6f6e74687320616e64206d617920626520757064617465642c207265706c616365642c206f72206f62736f6c65746564206279206f7468657220646f63756d656e747320617420616e792074696d652e20497420697320696e617070726f70726961746520746f2075736520496e7465726e65742d447261667473206173207265666572656e6365206d6174657269616c206f7220746f2063697465207468656d206f74686572207468616e206173202fe2809c776f726b20696e2070726f67496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d6f6e74687320616e64206d617920626520757064617465642c207265706c616365642c206f72206f62736f6c65746564206279206f7468657220646f63756d656e747320617420616e792074696d652e20497420697320696e617070726f70726961746520746f2075736520496e7465726e65742d447261667473206173207265666572656e6365206d6174657269616c206f7220746f2063697465207468656d206f74686572207468616e206173202fe2809c776f726b20696e2070726f67496e7465726e65742d4472616674732061726520647261667420646f63756d656e74732076616c696420666f722061206d6178696d756d206f6620736978206d
|
||||
Ciphertext = 64a0861575861af460f062c79be643bd5e805cfd345cf389f108670ac76c8cb24c6cfc18755d43eea09ee94e382d26b0bdb7b73c321b0100d4f03b7f355894cf332f830e710b97ce98c8a84abd0b948114ad176e008d33bd60f982b1ff37c8559797a06ef4f0ef61c186324e2b3506383606907b6a7c02b0f9f6157b53c867e4b9166c767b804d46a59b5216cde7a4e99040c5a40433225ee282a1b0a06c523eaf4534d7f83fa1155b0047718cbc546a0d072b04b3564eea1b422273f548271a0bb2316053fa76991955ebd63159434ecebb4e466dae5a1073a6727627097a1049e617d91d361094fa68f0ff77987130305beaba2eda04df997b714d6c6f2c299da65ba25e6a85842bf0440fd98a9a2266b061c4b3a13327c090f9a0789f58aad805275e4378a525f19232bfbfb749ede38480f405cf43ec2f1f8619ebcbc80a89e92a859c7911e674977ab17d4a7126a6b8a477358ff14a344d276ef6e504e10268ac3619fcf90c2d6c03fc2e3d1f290d9bf26c1fa1495dd8f97eec6229a55c2354e4524143551a5cc370a1c622c9390530cff21c3e1ed50c5e3daf97518ccce34156bdbd7eafab8bd417aef25c6c927301731bd319d247a1d5c3186ed10bfd9a7a24bac30e3e4503ed9204154d338b79ea276e7058e7f20f4d4fd1ac93d63f611af7b6d006c2a72add0eedc497b19cb30a198816664f0da00155f2e2d6ac61045b296d614301e0ad4983308028850dd4feffe3a8163970306e4047f5a165cb4befbc129729cd2e286e837e9b606486d402acc3dec5bf8b92387f6e486f2140
|
||||
|
||||
# TLS1 PRF tests, from NIST test vectors
|
||||
|
||||
KDF=TLS1-PRF
|
||||
|
|
Loading…
Reference in a new issue