PowerPC assembly pack: add POWER8 support.
Reviewed-by: Dr. Stephen Henson <steve@openssl.org>
This commit is contained in:
parent
d8a23532dd
commit
4577871ca3
21 changed files with 5096 additions and 36 deletions
|
@ -139,8 +139,8 @@ my $armv4_asm="armcap.o armv4cpuid.o:bn_asm.o armv4-mont.o armv4-gf2m.o::aes_cbc
|
|||
my $aarch64_asm="armcap.o arm64cpuid.o mem_clr.o:::aes_core.o aes_cbc.o aesv8-armx.o:::sha1-armv8.o sha256-armv8.o sha512-armv8.o:::::::ghashv8-armx.o:";
|
||||
my $parisc11_asm="pariscid.o:bn_asm.o parisc-mont.o::aes_core.o aes_cbc.o aes-parisc.o:::sha1-parisc.o sha256-parisc.o sha512-parisc.o::rc4-parisc.o:::::ghash-parisc.o::32";
|
||||
my $parisc20_asm="pariscid.o:pa-risc2W.o parisc-mont.o::aes_core.o aes_cbc.o aes-parisc.o:::sha1-parisc.o sha256-parisc.o sha512-parisc.o::rc4-parisc.o:::::ghash-parisc.o::64";
|
||||
my $ppc32_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o::::::::";
|
||||
my $ppc64_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o::::::::";
|
||||
my $ppc32_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o aesp8-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o sha256p8-ppc.o sha512p8-ppc.o:::::::ghashp8-ppc.o:";
|
||||
my $ppc64_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o aesp8-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o sha256p8-ppc.o sha512p8-ppc.o:::::::ghashp8-ppc.o:";
|
||||
my $no_asm=":::::::::::::::void";
|
||||
|
||||
# As for $BSDthreads. Idea is to maintain "collective" set of flags,
|
||||
|
|
|
@ -71,6 +71,8 @@ aes-sparcv9.s: asm/aes-sparcv9.pl
|
|||
|
||||
aes-ppc.s: asm/aes-ppc.pl
|
||||
$(PERL) asm/aes-ppc.pl $(PERLASM_SCHEME) $@
|
||||
aesp8-ppc.s: asm/aesp8-ppc.pl
|
||||
$(PERL) asm/aesp8-ppc.pl $(PERLASM_SCHEME) $@
|
||||
|
||||
aes-parisc.s: asm/aes-parisc.pl
|
||||
$(PERL) asm/aes-parisc.pl $(PERLASM_SCHEME) $@
|
||||
|
|
|
@ -548,7 +548,7 @@ Lenc_loop:
|
|||
xor $s2,$t2,$acc14
|
||||
xor $s3,$t3,$acc15
|
||||
addi $key,$key,16
|
||||
bdnz- Lenc_loop
|
||||
bdnz Lenc_loop
|
||||
|
||||
addi $Tbl2,$Tbl0,2048
|
||||
nop
|
||||
|
@ -982,7 +982,7 @@ Ldec_loop:
|
|||
xor $s2,$t2,$acc14
|
||||
xor $s3,$t3,$acc15
|
||||
addi $key,$key,16
|
||||
bdnz- Ldec_loop
|
||||
bdnz Ldec_loop
|
||||
|
||||
addi $Tbl2,$Tbl0,2048
|
||||
nop
|
||||
|
|
3726
crypto/aes/asm/aesp8-ppc.pl
Executable file
3726
crypto/aes/asm/aesp8-ppc.pl
Executable file
File diff suppressed because it is too large
Load diff
|
@ -191,7 +191,7 @@ L1st:
|
|||
|
||||
addi $j,$j,$BNSZ ; j++
|
||||
addi $tp,$tp,$BNSZ ; tp++
|
||||
bdnz- L1st
|
||||
bdnz L1st
|
||||
;L1st
|
||||
addc $lo0,$alo,$hi0
|
||||
addze $hi0,$ahi
|
||||
|
@ -253,7 +253,7 @@ Linner:
|
|||
addze $hi1,$hi1
|
||||
$ST $lo1,0($tp) ; tp[j-1]
|
||||
addi $tp,$tp,$BNSZ ; tp++
|
||||
bdnz- Linner
|
||||
bdnz Linner
|
||||
;Linner
|
||||
$LD $tj,$BNSZ($tp) ; tp[j]
|
||||
addc $lo0,$alo,$hi0
|
||||
|
@ -276,7 +276,7 @@ Linner:
|
|||
slwi $tj,$num,`log($BNSZ)/log(2)`
|
||||
$UCMP $i,$tj
|
||||
addi $i,$i,$BNSZ
|
||||
ble- Louter
|
||||
ble Louter
|
||||
|
||||
addi $num,$num,2 ; restore $num
|
||||
subfc $j,$j,$j ; j=0 and "clear" XER[CA]
|
||||
|
@ -289,7 +289,7 @@ Lsub: $LDX $tj,$tp,$j
|
|||
subfe $aj,$nj,$tj ; tp[j]-np[j]
|
||||
$STX $aj,$rp,$j
|
||||
addi $j,$j,$BNSZ
|
||||
bdnz- Lsub
|
||||
bdnz Lsub
|
||||
|
||||
li $j,0
|
||||
mtctr $num
|
||||
|
@ -304,7 +304,7 @@ Lcopy: ; copy or in-place refresh
|
|||
$STX $tj,$rp,$j
|
||||
$STX $j,$tp,$j ; zap at once
|
||||
addi $j,$j,$BNSZ
|
||||
bdnz- Lcopy
|
||||
bdnz Lcopy
|
||||
|
||||
$POP $tj,0($sp)
|
||||
li r3,1
|
||||
|
|
|
@ -1552,7 +1552,7 @@ Lppcasm_sub_mainloop:
|
|||
# if carry = 1 this is r7-r8. Else it
|
||||
# is r7-r8 -1 as we need.
|
||||
$STU r6,$BNSZ(r3)
|
||||
bdnz- Lppcasm_sub_mainloop
|
||||
bdnz Lppcasm_sub_mainloop
|
||||
Lppcasm_sub_adios:
|
||||
subfze r3,r0 # if carry bit is set then r3 = 0 else -1
|
||||
andi. r3,r3,1 # keep only last bit.
|
||||
|
@ -1598,7 +1598,7 @@ Lppcasm_add_mainloop:
|
|||
$LDU r8,$BNSZ(r5)
|
||||
adde r8,r7,r8
|
||||
$STU r8,$BNSZ(r3)
|
||||
bdnz- Lppcasm_add_mainloop
|
||||
bdnz Lppcasm_add_mainloop
|
||||
Lppcasm_add_adios:
|
||||
addze r3,r0 #return carry bit.
|
||||
blr
|
||||
|
@ -1755,7 +1755,7 @@ Lppcasm_sqr_mainloop:
|
|||
$UMULH r8,r6,r6
|
||||
$STU r7,$BNSZ(r3)
|
||||
$STU r8,$BNSZ(r3)
|
||||
bdnz- Lppcasm_sqr_mainloop
|
||||
bdnz Lppcasm_sqr_mainloop
|
||||
Lppcasm_sqr_adios:
|
||||
blr
|
||||
.long 0
|
||||
|
@ -1819,7 +1819,7 @@ Lppcasm_mw_LOOP:
|
|||
|
||||
addi r3,r3,`4*$BNSZ`
|
||||
addi r4,r4,`4*$BNSZ`
|
||||
bdnz- Lppcasm_mw_LOOP
|
||||
bdnz Lppcasm_mw_LOOP
|
||||
|
||||
Lppcasm_mw_REM:
|
||||
andi. r5,r5,0x3
|
||||
|
|
|
@ -561,7 +561,7 @@ $code.=<<___;
|
|||
stfd $T3b,`$FRAME+56`($sp)
|
||||
std $t0,8($tp) ; tp[j-1]
|
||||
stdu $t4,16($tp) ; tp[j]
|
||||
bdnz- L1st
|
||||
bdnz L1st
|
||||
|
||||
fctid $dota,$dota
|
||||
fctid $dotb,$dotb
|
||||
|
@ -856,7 +856,7 @@ $code.=<<___;
|
|||
addze $carry,$carry
|
||||
std $t3,-16($tp) ; tp[j-1]
|
||||
std $t5,-8($tp) ; tp[j]
|
||||
bdnz- Linner
|
||||
bdnz Linner
|
||||
|
||||
fctid $dota,$dota
|
||||
fctid $dotb,$dotb
|
||||
|
@ -954,7 +954,7 @@ Lsub: ldx $t0,$tp,$i
|
|||
stdx $t0,$rp,$i
|
||||
stdx $t2,$t6,$i
|
||||
addi $i,$i,16
|
||||
bdnz- Lsub
|
||||
bdnz Lsub
|
||||
|
||||
li $i,0
|
||||
subfe $ovf,$i,$ovf ; handle upmost overflow bit
|
||||
|
@ -981,7 +981,7 @@ Lcopy: ; copy or in-place refresh
|
|||
stdx $i,$tp,$i ; zap tp at once
|
||||
stdx $i,$t4,$i
|
||||
addi $i,$i,16
|
||||
bdnz- Lcopy
|
||||
bdnz Lcopy
|
||||
___
|
||||
$code.=<<___ if ($SIZE_T==4);
|
||||
subf $np,$num,$np ; rewind np
|
||||
|
@ -1014,7 +1014,7 @@ Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order
|
|||
stw $t5,8($rp)
|
||||
stw $t6,12($rp)
|
||||
stwu $t7,16($rp)
|
||||
bdnz- Lsub
|
||||
bdnz Lsub
|
||||
|
||||
li $i,0
|
||||
subfe $ovf,$i,$ovf ; handle upmost overflow bit
|
||||
|
@ -1046,7 +1046,7 @@ Lcopy: ; copy or in-place refresh
|
|||
stwu $t3,16($rp)
|
||||
std $i,8($tp) ; zap tp at once
|
||||
stdu $i,16($tp)
|
||||
bdnz- Lcopy
|
||||
bdnz Lcopy
|
||||
___
|
||||
|
||||
$code.=<<___;
|
||||
|
|
|
@ -140,6 +140,19 @@ void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
|
|||
const unsigned char ivec[AES_BLOCK_SIZE]);
|
||||
#endif
|
||||
|
||||
#if defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
|
||||
extern int OPENSSL_ppccap_P;
|
||||
# define HWAES_CAPABLE (OPENSSL_ppccap_P & (1<<2))
|
||||
# define HWAES_set_encrypt_key aes_p8_set_encrypt_key
|
||||
# define HWAES_set_decrypt_key aes_p8_set_decrypt_key
|
||||
# define HWAES_encrypt aes_p8_encrypt
|
||||
# define HWAES_decrypt aes_p8_decrypt
|
||||
# define HWAES_cbc_encrypt aes_p8_cbc_encrypt
|
||||
# define HWAES_ctr32_encrypt_blocks aes_p8_ctr32_encrypt_blocks
|
||||
# define HWAES_xts_encrypt aes_p8_xts_encrypt
|
||||
# define HWAES_xts_decrypt aes_p8_xts_decrypt
|
||||
#endif
|
||||
|
||||
#if defined(AES_ASM) && !defined(I386_ONLY) && ( \
|
||||
((defined(__i386) || defined(__i386__) || \
|
||||
defined(_M_IX86)) && defined(OPENSSL_IA32_SSE2))|| \
|
||||
|
@ -498,6 +511,13 @@ void HWAES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
|||
unsigned char *ivec, const int enc);
|
||||
void HWAES_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
|
||||
size_t len, const AES_KEY *key, const unsigned char ivec[16]);
|
||||
void HWAES_xts_encrypt(const unsigned char *inp, unsigned char *out,
|
||||
size_t len, const AES_KEY *key1,
|
||||
const AES_KEY *key2, const unsigned char iv[16]);
|
||||
void HWAES_xts_decrypt(const unsigned char *inp, unsigned char *out,
|
||||
size_t len, const AES_KEY *key1,
|
||||
const AES_KEY *key2, const unsigned char iv[16]);
|
||||
|
||||
#endif
|
||||
|
||||
#define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
|
||||
|
@ -1172,11 +1192,17 @@ static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
|
|||
{
|
||||
HWAES_set_encrypt_key(key, ctx->key_len * 4, &xctx->ks1);
|
||||
xctx->xts.block1 = (block128_f)HWAES_encrypt;
|
||||
#ifdef HWAES_xts_encrypt
|
||||
xctx->stream = HWAES_xts_encrypt;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
HWAES_set_decrypt_key(key, ctx->key_len * 4, &xctx->ks1);
|
||||
xctx->xts.block1 = (block128_f)HWAES_decrypt;
|
||||
#ifdef HWAES_xts_decrypt
|
||||
xctx->stream = HWAES_xts_decrypt;
|
||||
#endif
|
||||
}
|
||||
|
||||
HWAES_set_encrypt_key(key + ctx->key_len/2,
|
||||
|
|
|
@ -58,6 +58,8 @@ ghash-parisc.s: asm/ghash-parisc.pl
|
|||
$(PERL) asm/ghash-parisc.pl $(PERLASM_SCHEME) $@
|
||||
ghashv8-armx.S: asm/ghashv8-armx.pl
|
||||
$(PERL) asm/ghashv8-armx.pl $(PERLASM_SCHEME) $@
|
||||
ghashp8-ppc.s: asm/ghashp8-ppc.pl
|
||||
$(PERL) asm/ghashp8-ppc.pl $(PERLASM_SCHEME) $@
|
||||
|
||||
# GNU make "catch all"
|
||||
ghash-%.S: asm/ghash-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@
|
||||
|
|
663
crypto/modes/asm/ghashp8-ppc.pl
Executable file
663
crypto/modes/asm/ghashp8-ppc.pl
Executable file
|
@ -0,0 +1,663 @@
|
|||
#!/usr/bin/env perl
|
||||
#
|
||||
# ====================================================================
|
||||
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
# project. The module is, however, dual licensed under OpenSSL and
|
||||
# CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
# details see http://www.openssl.org/~appro/cryptogams/.
|
||||
# ====================================================================
|
||||
#
|
||||
# GHASH for for PowerISA v2.07.
|
||||
#
|
||||
# July 2014
|
||||
#
|
||||
# Accurate performance measurements are problematic, because it's
|
||||
# always virtualized setup with possibly throttled processor.
|
||||
# Relative comparison is therefore more informative. This initial
|
||||
# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
|
||||
# faster than "4-bit" integer-only compiler-generated 64-bit code.
|
||||
# "Initial version" means that there is room for futher improvement.
|
||||
|
||||
# May 2016
|
||||
#
|
||||
# 2x aggregated reduction improves performance by 50% (resulting
|
||||
# performance on POWER8 is 1 cycle per processed byte), and 4x
|
||||
# aggregated reduction - by 170% or 2.7x (resulting in 0.55 cpb).
|
||||
|
||||
$flavour=shift;
|
||||
$output =shift;
|
||||
|
||||
if ($flavour =~ /64/) {
|
||||
$SIZE_T=8;
|
||||
$LRSAVE=2*$SIZE_T;
|
||||
$STU="stdu";
|
||||
$POP="ld";
|
||||
$PUSH="std";
|
||||
$UCMP="cmpld";
|
||||
$SHRI="srdi";
|
||||
} elsif ($flavour =~ /32/) {
|
||||
$SIZE_T=4;
|
||||
$LRSAVE=$SIZE_T;
|
||||
$STU="stwu";
|
||||
$POP="lwz";
|
||||
$PUSH="stw";
|
||||
$UCMP="cmplw";
|
||||
$SHRI="srwi";
|
||||
} else { die "nonsense $flavour"; }
|
||||
|
||||
$sp="r1";
|
||||
$FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload
|
||||
|
||||
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
|
||||
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
|
||||
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
|
||||
die "can't locate ppc-xlate.pl";
|
||||
|
||||
open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
|
||||
|
||||
my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
|
||||
|
||||
my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
|
||||
my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
|
||||
my ($Xl1,$Xm1,$Xh1,$IN1,$H2,$H2h,$H2l)=map("v$_",(13..19));
|
||||
my $vrsave="r12";
|
||||
|
||||
$code=<<___;
|
||||
.machine "any"
|
||||
|
||||
.text
|
||||
|
||||
.globl .gcm_init_p8
|
||||
.align 5
|
||||
.gcm_init_p8:
|
||||
li r0,-4096
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
li r9,0x20
|
||||
mtspr 256,r0
|
||||
li r10,0x30
|
||||
lvx_u $H,0,r4 # load H
|
||||
|
||||
vspltisb $xC2,-16 # 0xf0
|
||||
vspltisb $t0,1 # one
|
||||
vaddubm $xC2,$xC2,$xC2 # 0xe0
|
||||
vxor $zero,$zero,$zero
|
||||
vor $xC2,$xC2,$t0 # 0xe1
|
||||
vsldoi $xC2,$xC2,$zero,15 # 0xe1...
|
||||
vsldoi $t1,$zero,$t0,1 # ...1
|
||||
vaddubm $xC2,$xC2,$xC2 # 0xc2...
|
||||
vspltisb $t2,7
|
||||
vor $xC2,$xC2,$t1 # 0xc2....01
|
||||
vspltb $t1,$H,0 # most significant byte
|
||||
vsl $H,$H,$t0 # H<<=1
|
||||
vsrab $t1,$t1,$t2 # broadcast carry bit
|
||||
vand $t1,$t1,$xC2
|
||||
vxor $IN,$H,$t1 # twisted H
|
||||
|
||||
vsldoi $H,$IN,$IN,8 # twist even more ...
|
||||
vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
|
||||
vsldoi $Hl,$zero,$H,8 # ... and split
|
||||
vsldoi $Hh,$H,$zero,8
|
||||
|
||||
stvx_u $xC2,0,r3 # save pre-computed table
|
||||
stvx_u $Hl,r8,r3
|
||||
li r8,0x40
|
||||
stvx_u $H, r9,r3
|
||||
li r9,0x50
|
||||
stvx_u $Hh,r10,r3
|
||||
li r10,0x60
|
||||
|
||||
vpmsumd $Xl,$IN,$Hl # H.lo·H.lo
|
||||
vpmsumd $Xm,$IN,$H # H.hi·H.lo+H.lo·H.hi
|
||||
vpmsumd $Xh,$IN,$Hh # H.hi·H.hi
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
vxor $t1,$t1,$Xh
|
||||
vxor $IN1,$Xl,$t1
|
||||
|
||||
vsldoi $H2,$IN1,$IN1,8
|
||||
vsldoi $H2l,$zero,$H2,8
|
||||
vsldoi $H2h,$H2,$zero,8
|
||||
|
||||
stvx_u $H2l,r8,r3 # save H^2
|
||||
li r8,0x70
|
||||
stvx_u $H2,r9,r3
|
||||
li r9,0x80
|
||||
stvx_u $H2h,r10,r3
|
||||
li r10,0x90
|
||||
___
|
||||
{
|
||||
my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
|
||||
$code.=<<___;
|
||||
vpmsumd $Xl,$IN,$H2l # H.lo·H^2.lo
|
||||
vpmsumd $Xl1,$IN1,$H2l # H^2.lo·H^2.lo
|
||||
vpmsumd $Xm,$IN,$H2 # H.hi·H^2.lo+H.lo·H^2.hi
|
||||
vpmsumd $Xm1,$IN1,$H2 # H^2.hi·H^2.lo+H^2.lo·H^2.hi
|
||||
vpmsumd $Xh,$IN,$H2h # H.hi·H^2.hi
|
||||
vpmsumd $Xh1,$IN1,$H2h # H^2.hi·H^2.hi
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
vpmsumd $t6,$Xl1,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vsldoi $t4,$Xm1,$zero,8
|
||||
vsldoi $t5,$zero,$Xm1,8
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
vxor $Xl1,$Xl1,$t4
|
||||
vxor $Xh1,$Xh1,$t5
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vsldoi $Xl1,$Xl1,$Xl1,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
vxor $Xl1,$Xl1,$t6
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
vpmsumd $Xl1,$Xl1,$xC2
|
||||
vxor $t1,$t1,$Xh
|
||||
vxor $t5,$t5,$Xh1
|
||||
vxor $Xl,$Xl,$t1
|
||||
vxor $Xl1,$Xl1,$t5
|
||||
|
||||
vsldoi $H,$Xl,$Xl,8
|
||||
vsldoi $H2,$Xl1,$Xl1,8
|
||||
vsldoi $Hl,$zero,$H,8
|
||||
vsldoi $Hh,$H,$zero,8
|
||||
vsldoi $H2l,$zero,$H2,8
|
||||
vsldoi $H2h,$H2,$zero,8
|
||||
|
||||
stvx_u $Hl,r8,r3 # save H^3
|
||||
li r8,0xa0
|
||||
stvx_u $H,r9,r3
|
||||
li r9,0xb0
|
||||
stvx_u $Hh,r10,r3
|
||||
li r10,0xc0
|
||||
stvx_u $H2l,r8,r3 # save H^4
|
||||
stvx_u $H2,r9,r3
|
||||
stvx_u $H2h,r10,r3
|
||||
|
||||
mtspr 256,$vrsave
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_init_p8,.-.gcm_init_p8
|
||||
___
|
||||
}
|
||||
$code.=<<___;
|
||||
.globl .gcm_gmult_p8
|
||||
.align 5
|
||||
.gcm_gmult_p8:
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
li r9,0x20
|
||||
mtspr 256,r0
|
||||
li r10,0x30
|
||||
lvx_u $IN,0,$Xip # load Xi
|
||||
|
||||
lvx_u $Hl,r8,$Htbl # load pre-computed table
|
||||
le?lvsl $lemask,r0,r0
|
||||
lvx_u $H, r9,$Htbl
|
||||
le?vspltisb $t0,0x07
|
||||
lvx_u $Hh,r10,$Htbl
|
||||
le?vxor $lemask,$lemask,$t0
|
||||
lvx_u $xC2,0,$Htbl
|
||||
le?vperm $IN,$IN,$IN,$lemask
|
||||
vxor $zero,$zero,$zero
|
||||
|
||||
vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
|
||||
vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
|
||||
vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
vxor $t1,$t1,$Xh
|
||||
vxor $Xl,$Xl,$t1
|
||||
|
||||
le?vperm $Xl,$Xl,$Xl,$lemask
|
||||
stvx_u $Xl,0,$Xip # write out Xi
|
||||
|
||||
mtspr 256,$vrsave
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_gmult_p8,.-.gcm_gmult_p8
|
||||
|
||||
.globl .gcm_ghash_p8
|
||||
.align 5
|
||||
.gcm_ghash_p8:
|
||||
li r0,-4096
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
li r9,0x20
|
||||
mtspr 256,r0
|
||||
li r10,0x30
|
||||
lvx_u $Xl,0,$Xip # load Xi
|
||||
|
||||
lvx_u $Hl,r8,$Htbl # load pre-computed table
|
||||
li r8,0x40
|
||||
le?lvsl $lemask,r0,r0
|
||||
lvx_u $H, r9,$Htbl
|
||||
li r9,0x50
|
||||
le?vspltisb $t0,0x07
|
||||
lvx_u $Hh,r10,$Htbl
|
||||
li r10,0x60
|
||||
le?vxor $lemask,$lemask,$t0
|
||||
lvx_u $xC2,0,$Htbl
|
||||
le?vperm $Xl,$Xl,$Xl,$lemask
|
||||
vxor $zero,$zero,$zero
|
||||
|
||||
${UCMP}i $len,64
|
||||
bge Lgcm_ghash_p8_4x
|
||||
|
||||
lvx_u $IN,0,$inp
|
||||
addi $inp,$inp,16
|
||||
subic. $len,$len,16
|
||||
le?vperm $IN,$IN,$IN,$lemask
|
||||
vxor $IN,$IN,$Xl
|
||||
beq Lshort
|
||||
|
||||
lvx_u $H2l,r8,$Htbl # load H^2
|
||||
li r8,16
|
||||
lvx_u $H2, r9,$Htbl
|
||||
add r9,$inp,$len # end of input
|
||||
lvx_u $H2h,r10,$Htbl
|
||||
be?b Loop_2x
|
||||
|
||||
.align 5
|
||||
Loop_2x:
|
||||
lvx_u $IN1,0,$inp
|
||||
le?vperm $IN1,$IN1,$IN1,$lemask
|
||||
|
||||
subic $len,$len,32
|
||||
vpmsumd $Xl,$IN,$H2l # H^2.lo·Xi.lo
|
||||
vpmsumd $Xl1,$IN1,$Hl # H.lo·Xi+1.lo
|
||||
subfe r0,r0,r0 # borrow?-1:0
|
||||
vpmsumd $Xm,$IN,$H2 # H^2.hi·Xi.lo+H^2.lo·Xi.hi
|
||||
vpmsumd $Xm1,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+1.hi
|
||||
and r0,r0,$len
|
||||
vpmsumd $Xh,$IN,$H2h # H^2.hi·Xi.hi
|
||||
vpmsumd $Xh1,$IN1,$Hh # H.hi·Xi+1.hi
|
||||
add $inp,$inp,r0
|
||||
|
||||
vxor $Xl,$Xl,$Xl1
|
||||
vxor $Xm,$Xm,$Xm1
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xh,$Xh,$Xh1
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
lvx_u $IN,r8,$inp
|
||||
addi $inp,$inp,32
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
le?vperm $IN,$IN,$IN,$lemask
|
||||
vxor $t1,$t1,$Xh
|
||||
vxor $IN,$IN,$t1
|
||||
vxor $IN,$IN,$Xl
|
||||
$UCMP r9,$inp
|
||||
bgt Loop_2x # done yet?
|
||||
|
||||
cmplwi $len,0
|
||||
bne Leven
|
||||
|
||||
Lshort:
|
||||
vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
|
||||
vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
|
||||
vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
vxor $t1,$t1,$Xh
|
||||
|
||||
Leven:
|
||||
vxor $Xl,$Xl,$t1
|
||||
le?vperm $Xl,$Xl,$Xl,$lemask
|
||||
stvx_u $Xl,0,$Xip # write out Xi
|
||||
|
||||
mtspr 256,$vrsave
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
___
|
||||
{
|
||||
my ($Xl3,$Xm2,$IN2,$H3l,$H3,$H3h,
|
||||
$Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31));
|
||||
my $IN0=$IN;
|
||||
my ($H21l,$H21h,$loperm,$hiperm) = ($Hl,$Hh,$H2l,$H2h);
|
||||
|
||||
$code.=<<___;
|
||||
.align 5
|
||||
.gcm_ghash_p8_4x:
|
||||
Lgcm_ghash_p8_4x:
|
||||
$STU $sp,-$FRAME($sp)
|
||||
li r10,`15+6*$SIZE_T`
|
||||
li r11,`31+6*$SIZE_T`
|
||||
stvx v20,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v21,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v22,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v23,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v24,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v25,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v26,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v27,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v28,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v29,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v30,r10,$sp
|
||||
li r10,0x60
|
||||
stvx v31,r11,$sp
|
||||
li r0,-1
|
||||
stw $vrsave,`$FRAME-4`($sp) # save vrsave
|
||||
mtspr 256,r0 # preserve all AltiVec registers
|
||||
|
||||
lvsl $t0,0,r8 # 0x0001..0e0f
|
||||
#lvx_u $H2l,r8,$Htbl # load H^2
|
||||
li r8,0x70
|
||||
lvx_u $H2, r9,$Htbl
|
||||
li r9,0x80
|
||||
vspltisb $t1,8 # 0x0808..0808
|
||||
#lvx_u $H2h,r10,$Htbl
|
||||
li r10,0x90
|
||||
lvx_u $H3l,r8,$Htbl # load H^3
|
||||
li r8,0xa0
|
||||
lvx_u $H3, r9,$Htbl
|
||||
li r9,0xb0
|
||||
lvx_u $H3h,r10,$Htbl
|
||||
li r10,0xc0
|
||||
lvx_u $H4l,r8,$Htbl # load H^4
|
||||
li r8,0x10
|
||||
lvx_u $H4, r9,$Htbl
|
||||
li r9,0x20
|
||||
lvx_u $H4h,r10,$Htbl
|
||||
li r10,0x30
|
||||
|
||||
vsldoi $t2,$zero,$t1,8 # 0x0000..0808
|
||||
vaddubm $hiperm,$t0,$t2 # 0x0001..1617
|
||||
vaddubm $loperm,$t1,$hiperm # 0x0809..1e1f
|
||||
|
||||
$SHRI $len,$len,4 # this allows to use sign bit
|
||||
# as carry
|
||||
lvx_u $IN0,0,$inp # load input
|
||||
lvx_u $IN1,r8,$inp
|
||||
subic. $len,$len,8
|
||||
lvx_u $IN2,r9,$inp
|
||||
lvx_u $IN3,r10,$inp
|
||||
addi $inp,$inp,0x40
|
||||
le?vperm $IN0,$IN0,$IN0,$lemask
|
||||
le?vperm $IN1,$IN1,$IN1,$lemask
|
||||
le?vperm $IN2,$IN2,$IN2,$lemask
|
||||
le?vperm $IN3,$IN3,$IN3,$lemask
|
||||
|
||||
vxor $Xh,$IN0,$Xl
|
||||
|
||||
vpmsumd $Xl1,$IN1,$H3l
|
||||
vpmsumd $Xm1,$IN1,$H3
|
||||
vpmsumd $Xh1,$IN1,$H3h
|
||||
|
||||
vperm $H21l,$H2,$H,$hiperm
|
||||
vperm $t0,$IN2,$IN3,$loperm
|
||||
vperm $H21h,$H2,$H,$loperm
|
||||
vperm $t1,$IN2,$IN3,$hiperm
|
||||
vpmsumd $Xm2,$IN2,$H2 # H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo
|
||||
vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+2.lo+H.lo·Xi+3.lo
|
||||
vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
|
||||
vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+2.hi+H.hi·Xi+3.hi
|
||||
|
||||
vxor $Xm2,$Xm2,$Xm1
|
||||
vxor $Xl3,$Xl3,$Xl1
|
||||
vxor $Xm3,$Xm3,$Xm2
|
||||
vxor $Xh3,$Xh3,$Xh1
|
||||
|
||||
blt Ltail_4x
|
||||
|
||||
Loop_4x:
|
||||
lvx_u $IN0,0,$inp
|
||||
lvx_u $IN1,r8,$inp
|
||||
subic. $len,$len,4
|
||||
lvx_u $IN2,r9,$inp
|
||||
lvx_u $IN3,r10,$inp
|
||||
addi $inp,$inp,0x40
|
||||
le?vperm $IN1,$IN1,$IN1,$lemask
|
||||
le?vperm $IN2,$IN2,$IN2,$lemask
|
||||
le?vperm $IN3,$IN3,$IN3,$lemask
|
||||
le?vperm $IN0,$IN0,$IN0,$lemask
|
||||
|
||||
vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
|
||||
vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
|
||||
vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
|
||||
vpmsumd $Xl1,$IN1,$H3l
|
||||
vpmsumd $Xm1,$IN1,$H3
|
||||
vpmsumd $Xh1,$IN1,$H3h
|
||||
|
||||
vxor $Xl,$Xl,$Xl3
|
||||
vxor $Xm,$Xm,$Xm3
|
||||
vxor $Xh,$Xh,$Xh3
|
||||
vperm $t0,$IN2,$IN3,$loperm
|
||||
vperm $t1,$IN2,$IN3,$hiperm
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
vpmsumd $Xl3,$t0,$H21l # H.lo·Xi+3.lo +H^2.lo·Xi+2.lo
|
||||
vpmsumd $Xh3,$t1,$H21h # H.hi·Xi+3.hi +H^2.hi·Xi+2.hi
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xm2,$IN2,$H2 # H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi
|
||||
vpmsumd $Xm3,$IN3,$H # H.hi·Xi+3.lo +H.lo·Xi+3.hi
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
|
||||
vxor $Xl3,$Xl3,$Xl1
|
||||
vxor $Xh3,$Xh3,$Xh1
|
||||
vxor $Xh,$Xh,$IN0
|
||||
vxor $Xm2,$Xm2,$Xm1
|
||||
vxor $Xh,$Xh,$t1
|
||||
vxor $Xm3,$Xm3,$Xm2
|
||||
vxor $Xh,$Xh,$Xl
|
||||
bge Loop_4x
|
||||
|
||||
Ltail_4x:
|
||||
vpmsumd $Xl,$Xh,$H4l # H^4.lo·Xi.lo
|
||||
vpmsumd $Xm,$Xh,$H4 # H^4.hi·Xi.lo+H^4.lo·Xi.hi
|
||||
vpmsumd $Xh,$Xh,$H4h # H^4.hi·Xi.hi
|
||||
|
||||
vxor $Xl,$Xl,$Xl3
|
||||
vxor $Xm,$Xm,$Xm3
|
||||
|
||||
vpmsumd $t2,$Xl,$xC2 # 1st reduction phase
|
||||
|
||||
vsldoi $t0,$Xm,$zero,8
|
||||
vsldoi $t1,$zero,$Xm,8
|
||||
vxor $Xh,$Xh,$Xh3
|
||||
vxor $Xl,$Xl,$t0
|
||||
vxor $Xh,$Xh,$t1
|
||||
|
||||
vsldoi $Xl,$Xl,$Xl,8
|
||||
vxor $Xl,$Xl,$t2
|
||||
|
||||
vsldoi $t1,$Xl,$Xl,8 # 2nd reduction phase
|
||||
vpmsumd $Xl,$Xl,$xC2
|
||||
vxor $t1,$t1,$Xh
|
||||
vxor $Xl,$Xl,$t1
|
||||
|
||||
addic. $len,$len,4
|
||||
beq Ldone_4x
|
||||
|
||||
lvx_u $IN0,0,$inp
|
||||
${UCMP}i $len,2
|
||||
li $len,-4
|
||||
blt Lone
|
||||
lvx_u $IN1,r8,$inp
|
||||
beq Ltwo
|
||||
|
||||
Lthree:
|
||||
lvx_u $IN2,r9,$inp
|
||||
le?vperm $IN0,$IN0,$IN0,$lemask
|
||||
le?vperm $IN1,$IN1,$IN1,$lemask
|
||||
le?vperm $IN2,$IN2,$IN2,$lemask
|
||||
|
||||
vxor $Xh,$IN0,$Xl
|
||||
vmr $H4l,$H3l
|
||||
vmr $H4, $H3
|
||||
vmr $H4h,$H3h
|
||||
|
||||
vperm $t0,$IN1,$IN2,$loperm
|
||||
vperm $t1,$IN1,$IN2,$hiperm
|
||||
vpmsumd $Xm2,$IN1,$H2 # H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo
|
||||
vpmsumd $Xm3,$IN2,$H # H.hi·Xi+2.lo +H.lo·Xi+2.hi
|
||||
vpmsumd $Xl3,$t0,$H21l # H^2.lo·Xi+1.lo+H.lo·Xi+2.lo
|
||||
vpmsumd $Xh3,$t1,$H21h # H^2.hi·Xi+1.hi+H.hi·Xi+2.hi
|
||||
|
||||
vxor $Xm3,$Xm3,$Xm2
|
||||
b Ltail_4x
|
||||
|
||||
.align 4
|
||||
Ltwo:
|
||||
le?vperm $IN0,$IN0,$IN0,$lemask
|
||||
le?vperm $IN1,$IN1,$IN1,$lemask
|
||||
|
||||
vxor $Xh,$IN0,$Xl
|
||||
vperm $t0,$zero,$IN1,$loperm
|
||||
vperm $t1,$zero,$IN1,$hiperm
|
||||
|
||||
vsldoi $H4l,$zero,$H2,8
|
||||
vmr $H4, $H2
|
||||
vsldoi $H4h,$H2,$zero,8
|
||||
|
||||
vpmsumd $Xl3,$t0, $H21l # H.lo·Xi+1.lo
|
||||
vpmsumd $Xm3,$IN1,$H # H.hi·Xi+1.lo+H.lo·Xi+2.hi
|
||||
vpmsumd $Xh3,$t1, $H21h # H.hi·Xi+1.hi
|
||||
|
||||
b Ltail_4x
|
||||
|
||||
.align 4
|
||||
Lone:
|
||||
le?vperm $IN0,$IN0,$IN0,$lemask
|
||||
|
||||
vsldoi $H4l,$zero,$H,8
|
||||
vmr $H4, $H
|
||||
vsldoi $H4h,$H,$zero,8
|
||||
|
||||
vxor $Xh,$IN0,$Xl
|
||||
vxor $Xl3,$Xl3,$Xl3
|
||||
vxor $Xm3,$Xm3,$Xm3
|
||||
vxor $Xh3,$Xh3,$Xh3
|
||||
|
||||
b Ltail_4x
|
||||
|
||||
Ldone_4x:
|
||||
le?vperm $Xl,$Xl,$Xl,$lemask
|
||||
stvx_u $Xl,0,$Xip # write out Xi
|
||||
|
||||
li r10,`15+6*$SIZE_T`
|
||||
li r11,`31+6*$SIZE_T`
|
||||
mtspr 256,$vrsave
|
||||
lvx v20,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v21,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v22,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v23,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v24,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v25,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v26,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v27,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v28,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v29,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v30,r10,$sp
|
||||
lvx v31,r11,$sp
|
||||
addi $sp,$sp,$FRAME
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x04,0,0x80,0,4,0
|
||||
.long 0
|
||||
___
|
||||
}
|
||||
$code.=<<___;
|
||||
.size .gcm_ghash_p8,.-.gcm_ghash_p8
|
||||
|
||||
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
||||
___
|
||||
|
||||
foreach (split("\n",$code)) {
|
||||
s/\`([^\`]*)\`/eval $1/geo;
|
||||
|
||||
if ($flavour =~ /le$/o) { # little-endian
|
||||
s/le\?//o or
|
||||
s/be\?/#be#/o;
|
||||
} else {
|
||||
s/le\?/#le#/o or
|
||||
s/be\?//o;
|
||||
}
|
||||
print $_,"\n";
|
||||
}
|
||||
|
||||
close STDOUT; # enforce flush
|
|
@ -683,6 +683,14 @@ void gcm_init_v8(u128 Htable[16],const u64 Xi[2]);
|
|||
void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
|
||||
void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
|
||||
# endif
|
||||
# elif defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
|
||||
# define GHASH_ASM_PPC
|
||||
# define GCM_FUNCREF_4BIT
|
||||
extern int OPENSSL_ppccap_P;
|
||||
void gcm_init_p8(u128 Htable[16], const u64 Xi[2]);
|
||||
void gcm_gmult_p8(u64 Xi[2], const u128 Htable[16]);
|
||||
void gcm_ghash_p8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
|
||||
size_t len);
|
||||
# elif defined(_TMS320C6400_PLUS)
|
||||
# define GHASH_ASM_C64Xplus
|
||||
# endif
|
||||
|
@ -767,6 +775,16 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
|
|||
ctx->gmult = gcm_gmult_4bit;
|
||||
ctx->ghash = gcm_ghash_4bit;
|
||||
}
|
||||
# elif defined(GHASH_ASM_PPC)
|
||||
if (OPENSSL_ppccap_P & (1<<2)) {
|
||||
gcm_init_p8(ctx->Htable, ctx->H.u);
|
||||
ctx->gmult = gcm_gmult_p8;
|
||||
ctx->ghash = gcm_ghash_p8;
|
||||
} else {
|
||||
gcm_init_4bit(ctx->Htable, ctx->H.u);
|
||||
ctx->gmult = gcm_gmult_4bit;
|
||||
ctx->ghash = gcm_ghash_4bit;
|
||||
}
|
||||
# elif defined(GHASH_ASM_C64Xplus)
|
||||
/* C64x+ assembler doesn't use tables, skip gcm_init_4bit.
|
||||
* This is likely to trigger "function never referenced"
|
||||
|
|
|
@ -27,7 +27,8 @@ my $globl = sub {
|
|||
/osx/ && do { $name = "_$name";
|
||||
last;
|
||||
};
|
||||
/linux.*32/ && do { $ret .= ".globl $name\n";
|
||||
/linux.*(32|64le)/
|
||||
&& do { $ret .= ".globl $name\n";
|
||||
$ret .= ".type $name,\@function";
|
||||
last;
|
||||
};
|
||||
|
@ -37,7 +38,6 @@ my $globl = sub {
|
|||
$ret .= ".align 3\n";
|
||||
$ret .= "$name:\n";
|
||||
$ret .= ".quad .$name,.TOC.\@tocbase,0\n";
|
||||
$ret .= ".size $name,24\n";
|
||||
$ret .= ".previous\n";
|
||||
|
||||
$name = ".$name";
|
||||
|
@ -50,7 +50,9 @@ my $globl = sub {
|
|||
$ret;
|
||||
};
|
||||
my $text = sub {
|
||||
($flavour =~ /aix/) ? ".csect" : ".text";
|
||||
my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
|
||||
$ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/);
|
||||
$ret;
|
||||
};
|
||||
my $machine = sub {
|
||||
my $junk = shift;
|
||||
|
@ -62,9 +64,12 @@ my $machine = sub {
|
|||
".machine $arch";
|
||||
};
|
||||
my $size = sub {
|
||||
if ($flavour =~ /linux.*32/)
|
||||
if ($flavour =~ /linux/)
|
||||
{ shift;
|
||||
".size " . join(",",@_);
|
||||
my $name = shift; $name =~ s|^[\.\_]||;
|
||||
my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name;
|
||||
$ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/);
|
||||
$ret;
|
||||
}
|
||||
else
|
||||
{ ""; }
|
||||
|
@ -77,6 +82,25 @@ my $asciz = sub {
|
|||
else
|
||||
{ ""; }
|
||||
};
|
||||
my $quad = sub {
|
||||
shift;
|
||||
my @ret;
|
||||
my ($hi,$lo);
|
||||
for (@_) {
|
||||
if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
|
||||
{ $hi=$1?"0x$1":"0"; $lo="0x$2"; }
|
||||
elsif (/^([0-9]+)$/o)
|
||||
{ $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl
|
||||
else
|
||||
{ $hi=undef; $lo=$_; }
|
||||
|
||||
if (defined($hi))
|
||||
{ push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); }
|
||||
else
|
||||
{ push(@ret,".quad $lo"); }
|
||||
}
|
||||
join("\n",@ret);
|
||||
};
|
||||
|
||||
################################################################
|
||||
# simplified mnemonics not handled by at least one assembler
|
||||
|
@ -122,6 +146,66 @@ my $extrdi = sub {
|
|||
$b = ($b+$n)&63; $n = 64-$n;
|
||||
" rldicl $ra,$rs,$b,$n";
|
||||
};
|
||||
my $vmr = sub {
|
||||
my ($f,$vx,$vy) = @_;
|
||||
" vor $vx,$vy,$vy";
|
||||
};
|
||||
|
||||
# Some ABIs specify vrsave, special-purpose register #256, as reserved
|
||||
# for system use.
|
||||
my $no_vrsave = ($flavour =~ /aix|linux64le/);
|
||||
my $mtspr = sub {
|
||||
my ($f,$idx,$ra) = @_;
|
||||
if ($idx == 256 && $no_vrsave) {
|
||||
" or $ra,$ra,$ra";
|
||||
} else {
|
||||
" mtspr $idx,$ra";
|
||||
}
|
||||
};
|
||||
my $mfspr = sub {
|
||||
my ($f,$rd,$idx) = @_;
|
||||
if ($idx == 256 && $no_vrsave) {
|
||||
" li $rd,-1";
|
||||
} else {
|
||||
" mfspr $rd,$idx";
|
||||
}
|
||||
};
|
||||
|
||||
# PowerISA 2.06 stuff
|
||||
sub vsxmem_op {
|
||||
my ($f, $vrt, $ra, $rb, $op) = @_;
|
||||
" .long ".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
|
||||
}
|
||||
# made-up unaligned memory reference AltiVec/VMX instructions
|
||||
my $lvx_u = sub { vsxmem_op(@_, 844); }; # lxvd2x
|
||||
my $stvx_u = sub { vsxmem_op(@_, 972); }; # stxvd2x
|
||||
my $lvdx_u = sub { vsxmem_op(@_, 588); }; # lxsdx
|
||||
my $stvdx_u = sub { vsxmem_op(@_, 716); }; # stxsdx
|
||||
my $lvx_4w = sub { vsxmem_op(@_, 780); }; # lxvw4x
|
||||
my $stvx_4w = sub { vsxmem_op(@_, 908); }; # stxvw4x
|
||||
|
||||
# PowerISA 2.07 stuff
|
||||
sub vcrypto_op {
|
||||
my ($f, $vrt, $vra, $vrb, $op) = @_;
|
||||
" .long ".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
|
||||
}
|
||||
my $vcipher = sub { vcrypto_op(@_, 1288); };
|
||||
my $vcipherlast = sub { vcrypto_op(@_, 1289); };
|
||||
my $vncipher = sub { vcrypto_op(@_, 1352); };
|
||||
my $vncipherlast= sub { vcrypto_op(@_, 1353); };
|
||||
my $vsbox = sub { vcrypto_op(@_, 0, 1480); };
|
||||
my $vshasigmad = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
|
||||
my $vshasigmaw = sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
|
||||
my $vpmsumb = sub { vcrypto_op(@_, 1032); };
|
||||
my $vpmsumd = sub { vcrypto_op(@_, 1224); };
|
||||
my $vpmsubh = sub { vcrypto_op(@_, 1096); };
|
||||
my $vpmsumw = sub { vcrypto_op(@_, 1160); };
|
||||
my $vaddudm = sub { vcrypto_op(@_, 192); };
|
||||
|
||||
my $mtsle = sub {
|
||||
my ($f, $arg) = @_;
|
||||
" .long ".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
|
||||
};
|
||||
|
||||
while($line=<>) {
|
||||
|
||||
|
@ -138,7 +222,10 @@ while($line=<>) {
|
|||
{
|
||||
$line =~ s|(^[\.\w]+)\:\s*||;
|
||||
my $label = $1;
|
||||
printf "%s:",($GLOBALS{$label} or $label) if ($label);
|
||||
if ($label) {
|
||||
printf "%s:",($GLOBALS{$label} or $label);
|
||||
printf "\n.localentry\t$GLOBALS{$label},0" if ($GLOBALS{$label} && $flavour =~ /linux.*64le/);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -147,7 +234,7 @@ while($line=<>) {
|
|||
my $mnemonic = $2;
|
||||
my $f = $3;
|
||||
my $opcode = eval("\$$mnemonic");
|
||||
$line =~ s|\bc?[rf]([0-9]+)\b|$1|g if ($c ne "." and $flavour !~ /osx/);
|
||||
$line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
|
||||
if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
|
||||
elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; }
|
||||
}
|
||||
|
|
|
@ -3,13 +3,24 @@
|
|||
#include <string.h>
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#if defined(__linux) || defined(_AIX)
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
#if defined(_AIX53) /* defined even on post-5.3 */
|
||||
# include <sys/systemcfg.h>
|
||||
# if !defined(__power_set)
|
||||
# define __power_set(a) (_system_configuration.implementation & (a))
|
||||
# endif
|
||||
#endif
|
||||
#include <crypto.h>
|
||||
#include <openssl/bn.h>
|
||||
|
||||
#define PPC_FPU64 (1<<0)
|
||||
#define PPC_ALTIVEC (1<<1)
|
||||
#define PPC_CRYPTO207 (1<<2)
|
||||
|
||||
static int OPENSSL_ppccap_P = 0;
|
||||
int OPENSSL_ppccap_P = 0;
|
||||
|
||||
static sigset_t all_masked;
|
||||
|
||||
|
@ -49,10 +60,28 @@ int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_U
|
|||
}
|
||||
#endif
|
||||
|
||||
void sha256_block_p8(void *ctx, const void *inp, size_t len);
|
||||
void sha256_block_ppc(void *ctx, const void *inp, size_t len);
|
||||
void sha256_block_data_order(void *ctx, const void *inp, size_t len)
|
||||
{
|
||||
OPENSSL_ppccap_P & PPC_CRYPTO207 ? sha256_block_p8(ctx, inp, len) :
|
||||
sha256_block_ppc(ctx, inp, len);
|
||||
}
|
||||
|
||||
void sha512_block_p8(void *ctx, const void *inp, size_t len);
|
||||
void sha512_block_ppc(void *ctx, const void *inp, size_t len);
|
||||
void sha512_block_data_order(void *ctx, const void *inp, size_t len)
|
||||
{
|
||||
OPENSSL_ppccap_P & PPC_CRYPTO207 ? sha512_block_p8(ctx, inp, len) :
|
||||
sha512_block_ppc(ctx, inp, len);
|
||||
}
|
||||
|
||||
static sigjmp_buf ill_jmp;
|
||||
static void ill_handler (int sig) { siglongjmp(ill_jmp,sig); }
|
||||
|
||||
void OPENSSL_ppc64_probe(void);
|
||||
void OPENSSL_altivec_probe(void);
|
||||
void OPENSSL_crypto207_probe(void);
|
||||
|
||||
void OPENSSL_cpuid_setup(void)
|
||||
{
|
||||
|
@ -82,6 +111,45 @@ void OPENSSL_cpuid_setup(void)
|
|||
|
||||
OPENSSL_ppccap_P = 0;
|
||||
|
||||
#if defined(_AIX)
|
||||
if (sizeof(size_t) == 4) {
|
||||
struct utsname uts;
|
||||
# if defined(_SC_AIX_KERNEL_BITMODE)
|
||||
if (sysconf(_SC_AIX_KERNEL_BITMODE) != 64)
|
||||
return;
|
||||
# endif
|
||||
if (uname(&uts) != 0 || atoi(uts.version) < 6)
|
||||
return;
|
||||
}
|
||||
|
||||
# if defined(__power_set)
|
||||
/*
|
||||
* Value used in __power_set is a single-bit 1<<n one denoting
|
||||
* specific processor class. Incidentally 0xffffffff<<n can be
|
||||
* used to denote specific processor and its successors.
|
||||
*/
|
||||
if (sizeof(size_t) == 4) {
|
||||
/* In 32-bit case PPC_FPU64 is always fastest [if option] */
|
||||
if (__power_set(0xffffffffU<<13)) /* POWER5 and later */
|
||||
OPENSSL_ppccap_P |= PPC_FPU64;
|
||||
} else {
|
||||
/* In 64-bit case PPC_FPU64 is fastest only on POWER6 */
|
||||
# if 0 /* to keep compatibility with previous validations */
|
||||
if (__power_set(0x1U<<14)) /* POWER6 */
|
||||
OPENSSL_ppccap_P |= PPC_FPU64;
|
||||
# endif
|
||||
}
|
||||
|
||||
if (__power_set(0xffffffffU<<14)) /* POWER6 and later */
|
||||
OPENSSL_ppccap_P |= PPC_ALTIVEC;
|
||||
|
||||
if (__power_set(0xffffffffU<<16)) /* POWER8 and later */
|
||||
OPENSSL_ppccap_P |= PPC_CRYPTO207;
|
||||
|
||||
return;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
memset(&ill_act,0,sizeof(ill_act));
|
||||
ill_act.sa_handler = ill_handler;
|
||||
ill_act.sa_mask = all_masked;
|
||||
|
@ -108,6 +176,11 @@ void OPENSSL_cpuid_setup(void)
|
|||
{
|
||||
OPENSSL_altivec_probe();
|
||||
OPENSSL_ppccap_P |= PPC_ALTIVEC;
|
||||
if (sigsetjmp(ill_jmp, 1) == 0)
|
||||
{
|
||||
OPENSSL_crypto207_probe();
|
||||
OPENSSL_ppccap_P |= PPC_CRYPTO207;
|
||||
}
|
||||
}
|
||||
|
||||
sigaction (SIGILL,&ill_oact,NULL);
|
||||
|
|
|
@ -40,6 +40,16 @@ $code=<<___;
|
|||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,0,0
|
||||
|
||||
.globl .OPENSSL_crypto207_probe
|
||||
.align 4
|
||||
.OPENSSL_crypto207_probe:
|
||||
.long 0x7C000E99 # lvx_u v0,0,r1
|
||||
.long 0x10000508 # vcipher v0,v0,v0
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,0,0
|
||||
.size .OPENSSL_crypto207_probe,.-.OPENSSL_crypto207_probe
|
||||
|
||||
.globl .OPENSSL_wipe_cpu
|
||||
.align 4
|
||||
.OPENSSL_wipe_cpu:
|
||||
|
|
|
@ -73,6 +73,8 @@ sha512-sparcv9.s:asm/sha512-sparcv9.pl; $(PERL) asm/sha512-sparcv9.pl $@ $(CFLAG
|
|||
sha1-ppc.s: asm/sha1-ppc.pl; $(PERL) asm/sha1-ppc.pl $(PERLASM_SCHEME) $@
|
||||
sha256-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@
|
||||
sha512-ppc.s: asm/sha512-ppc.pl; $(PERL) asm/sha512-ppc.pl $(PERLASM_SCHEME) $@
|
||||
sha256p8-ppc.s: asm/sha512p8-ppc.pl; $(PERL) asm/sha512p8-ppc.pl $(PERLASM_SCHEME) $@
|
||||
sha512p8-ppc.s: asm/sha512p8-ppc.pl; $(PERL) asm/sha512p8-ppc.pl $(PERLASM_SCHEME) $@
|
||||
|
||||
sha1-parisc.s: asm/sha1-parisc.pl; $(PERL) asm/sha1-parisc.pl $(PERLASM_SCHEME) $@
|
||||
sha256-parisc.s:asm/sha512-parisc.pl; $(PERL) asm/sha512-parisc.pl $(PERLASM_SCHEME) $@
|
||||
|
|
|
@ -210,7 +210,7 @@ Lunaligned:
|
|||
srwi. $t1,$t1,6 ; t1/=64
|
||||
beq Lcross_page
|
||||
$UCMP $num,$t1
|
||||
ble- Laligned ; didn't cross the page boundary
|
||||
ble Laligned ; didn't cross the page boundary
|
||||
mtctr $t1
|
||||
subfc $num,$t1,$num
|
||||
bl Lsha1_block_private
|
||||
|
@ -238,7 +238,7 @@ Lmemcpy:
|
|||
bl Lsha1_block_private
|
||||
$POP $inp,`$FRAME-$SIZE_T*18`($sp)
|
||||
addic. $num,$num,-1
|
||||
bne- Lunaligned
|
||||
bne Lunaligned
|
||||
|
||||
Ldone:
|
||||
$POP r0,`$FRAME+$LRSAVE`($sp)
|
||||
|
@ -312,7 +312,7 @@ $code.=<<___;
|
|||
stw r20,16($ctx)
|
||||
mr $E,r20
|
||||
addi $inp,$inp,`16*4`
|
||||
bdnz- Lsha1_block_private
|
||||
bdnz Lsha1_block_private
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,0,0
|
||||
|
|
|
@ -64,7 +64,7 @@ die "can't locate ppc-xlate.pl";
|
|||
open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
|
||||
|
||||
if ($output =~ /512/) {
|
||||
$func="sha512_block_data_order";
|
||||
$func="sha512_block_ppc";
|
||||
$SZ=8;
|
||||
@Sigma0=(28,34,39);
|
||||
@Sigma1=(14,18,41);
|
||||
|
@ -76,7 +76,7 @@ if ($output =~ /512/) {
|
|||
$ROR="rotrdi";
|
||||
$SHR="srdi";
|
||||
} else {
|
||||
$func="sha256_block_data_order";
|
||||
$func="sha256_block_ppc";
|
||||
$SZ=4;
|
||||
@Sigma0=( 2,13,22);
|
||||
@Sigma1=( 6,11,25);
|
||||
|
@ -243,7 +243,7 @@ Lunaligned:
|
|||
andi. $t1,$t1,`4096-16*$SZ` ; distance to closest page boundary
|
||||
beq Lcross_page
|
||||
$UCMP $num,$t1
|
||||
ble- Laligned ; didn't cross the page boundary
|
||||
ble Laligned ; didn't cross the page boundary
|
||||
subfc $num,$t1,$num
|
||||
add $t1,$inp,$t1
|
||||
$PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real remaining num
|
||||
|
@ -279,7 +279,7 @@ Lmemcpy:
|
|||
$POP $inp,`$FRAME-$SIZE_T*26`($sp) ; restore real inp
|
||||
$POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real num
|
||||
addic. $num,$num,`-16*$SZ` ; num--
|
||||
bne- Lunaligned
|
||||
bne Lunaligned
|
||||
|
||||
Ldone:
|
||||
$POP r0,`$FRAME+$LRSAVE`($sp)
|
||||
|
@ -339,7 +339,7 @@ for(;$i<32;$i++) {
|
|||
unshift(@V,pop(@V));
|
||||
}
|
||||
$code.=<<___;
|
||||
bdnz- Lrounds
|
||||
bdnz Lrounds
|
||||
|
||||
$POP $ctx,`$FRAME-$SIZE_T*22`($sp)
|
||||
$POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer
|
||||
|
|
431
crypto/sha/asm/sha512p8-ppc.pl
Executable file
431
crypto/sha/asm/sha512p8-ppc.pl
Executable file
|
@ -0,0 +1,431 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
# ====================================================================
|
||||
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
# project. The module is, however, dual licensed under OpenSSL and
|
||||
# CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
# details see http://www.openssl.org/~appro/cryptogams/.
|
||||
# ====================================================================
|
||||
|
||||
# SHA256/512 for PowerISA v2.07.
|
||||
#
|
||||
# Accurate performance measurements are problematic, because it's
|
||||
# always virtualized setup with possibly throttled processor.
|
||||
# Relative comparison is therefore more informative. This module is
|
||||
# ~60% faster than integer-only sha512-ppc.pl. To anchor to something
|
||||
# else, SHA256 is 24% slower than sha1-ppc.pl and 2.5x slower than
|
||||
# hardware-assisted aes-128-cbc encrypt. SHA512 is 20% faster than
|
||||
# sha1-ppc.pl and 1.6x slower than aes-128-cbc. Another interesting
|
||||
# result is degree of computational resources' utilization. POWER8 is
|
||||
# "massively multi-threaded chip" and difference between single- and
|
||||
# maximum multi-process benchmark results tells that utlization is
|
||||
# whooping 94%. For sha512-ppc.pl we get [not unimpressive] 84% and
|
||||
# for sha1-ppc.pl - 73%. 100% means that multi-process result equals
|
||||
# to single-process one, given that all threads end up on the same
|
||||
# physical core.
|
||||
#
|
||||
#######################################################################
|
||||
#
|
||||
# SHA256/pre-2.07(*) SHA512/pre-2.07(*) SHA1(*)
|
||||
# POWER8 9.3 /14.8 5.8 /9.5 7.1
|
||||
#
|
||||
# (*) presented for reference/comparison purposes;
|
||||
|
||||
$flavour=shift;
|
||||
$output =shift;
|
||||
|
||||
if ($flavour =~ /64/) {
|
||||
$SIZE_T=8;
|
||||
$LRSAVE=2*$SIZE_T;
|
||||
$STU="stdu";
|
||||
$POP="ld";
|
||||
$PUSH="std";
|
||||
} elsif ($flavour =~ /32/) {
|
||||
$SIZE_T=4;
|
||||
$LRSAVE=$SIZE_T;
|
||||
$STU="stwu";
|
||||
$POP="lwz";
|
||||
$PUSH="stw";
|
||||
} else { die "nonsense $flavour"; }
|
||||
|
||||
$LENDIAN=($flavour=~/le/);
|
||||
|
||||
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
|
||||
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
|
||||
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
|
||||
die "can't locate ppc-xlate.pl";
|
||||
|
||||
open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
|
||||
|
||||
if ($output =~ /512/) {
|
||||
$bits=512;
|
||||
$SZ=8;
|
||||
$sz="d";
|
||||
$rounds=80;
|
||||
} else {
|
||||
$bits=256;
|
||||
$SZ=4;
|
||||
$sz="w";
|
||||
$rounds=64;
|
||||
}
|
||||
|
||||
$func="sha${bits}_block_p8";
|
||||
$FRAME=8*$SIZE_T;
|
||||
|
||||
$sp ="r1";
|
||||
$toc="r2";
|
||||
$ctx="r3";
|
||||
$inp="r4";
|
||||
$num="r5";
|
||||
$Tbl="r6";
|
||||
$idx="r7";
|
||||
$lrsave="r8";
|
||||
$offload="r11";
|
||||
$vrsave="r12";
|
||||
($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,10,26..31));
|
||||
$x00=0 if ($flavour =~ /osx/);
|
||||
|
||||
@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("v$_",(0..7));
|
||||
@X=map("v$_",(8..23));
|
||||
($Ki,$Func,$S0,$S1,$s0,$s1,$lemask)=map("v$_",(24..31));
|
||||
|
||||
sub ROUND {
|
||||
my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
|
||||
my $j=($i+1)%16;
|
||||
|
||||
$code.=<<___ if ($i<15 && ($i%(16/$SZ))==(16/$SZ-1));
|
||||
lvx_u @X[$i+1],0,$inp ; load X[i] in advance
|
||||
addi $inp,$inp,16
|
||||
___
|
||||
$code.=<<___ if ($i<16 && ($i%(16/$SZ)));
|
||||
vsldoi @X[$i],@X[$i-1],@X[$i-1],$SZ
|
||||
___
|
||||
$code.=<<___ if ($LENDIAN && $i<16 && ($i%(16/$SZ))==0);
|
||||
vperm @X[$i],@X[$i],@X[$i],$lemask
|
||||
___
|
||||
$code.=<<___;
|
||||
`"vshasigma${sz} $s0,@X[($j+1)%16],0,0" if ($i>=15)`
|
||||
vsel $Func,$g,$f,$e ; Ch(e,f,g)
|
||||
vshasigma${sz} $S1,$e,1,15 ; Sigma1(e)
|
||||
vaddu${sz}m $h,$h,@X[$i%16] ; h+=X[i]
|
||||
vshasigma${sz} $S0,$a,1,0 ; Sigma0(a)
|
||||
`"vshasigma${sz} $s1,@X[($j+14)%16],0,15" if ($i>=15)`
|
||||
vaddu${sz}m $h,$h,$Func ; h+=Ch(e,f,g)
|
||||
vxor $Func,$a,$b
|
||||
`"vaddu${sz}m @X[$j],@X[$j],@X[($j+9)%16]" if ($i>=15)`
|
||||
vaddu${sz}m $h,$h,$S1 ; h+=Sigma1(e)
|
||||
vsel $Func,$b,$c,$Func ; Maj(a,b,c)
|
||||
vaddu${sz}m $g,$g,$Ki ; future h+=K[i]
|
||||
vaddu${sz}m $d,$d,$h ; d+=h
|
||||
vaddu${sz}m $S0,$S0,$Func ; Sigma0(a)+Maj(a,b,c)
|
||||
`"vaddu${sz}m @X[$j],@X[$j],$s0" if ($i>=15)`
|
||||
lvx $Ki,$idx,$Tbl ; load next K[i]
|
||||
addi $idx,$idx,16
|
||||
vaddu${sz}m $h,$h,$S0 ; h+=Sigma0(a)+Maj(a,b,c)
|
||||
`"vaddu${sz}m @X[$j],@X[$j],$s1" if ($i>=15)`
|
||||
___
|
||||
}
|
||||
|
||||
$code=<<___;
|
||||
.machine "any"
|
||||
.text
|
||||
|
||||
.globl $func
|
||||
.align 6
|
||||
$func:
|
||||
$STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
|
||||
mflr $lrsave
|
||||
li r10,`$FRAME+8*16+15`
|
||||
li r11,`$FRAME+8*16+31`
|
||||
stvx v20,r10,$sp # ABI says so
|
||||
addi r10,r10,32
|
||||
mfspr $vrsave,256
|
||||
stvx v21,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v22,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v23,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v24,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v25,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v26,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v27,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v28,r10,$sp
|
||||
addi r10,r10,32
|
||||
stvx v29,r11,$sp
|
||||
addi r11,r11,32
|
||||
stvx v30,r10,$sp
|
||||
stvx v31,r11,$sp
|
||||
li r11,-1
|
||||
stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
|
||||
li $x10,0x10
|
||||
$PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
|
||||
li $x20,0x20
|
||||
$PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
|
||||
li $x30,0x30
|
||||
$PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
|
||||
li $x40,0x40
|
||||
$PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
|
||||
li $x50,0x50
|
||||
$PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
|
||||
li $x60,0x60
|
||||
$PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
|
||||
li $x70,0x70
|
||||
$PUSH $lrsave,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
|
||||
mtspr 256,r11
|
||||
|
||||
bl LPICmeup
|
||||
addi $offload,$sp,$FRAME+15
|
||||
___
|
||||
$code.=<<___ if ($LENDIAN);
|
||||
li $idx,8
|
||||
lvsl $lemask,0,$idx
|
||||
vspltisb $Ki,0x0f
|
||||
vxor $lemask,$lemask,$Ki
|
||||
___
|
||||
$code.=<<___ if ($SZ==4);
|
||||
lvx_4w $A,$x00,$ctx
|
||||
lvx_4w $E,$x10,$ctx
|
||||
vsldoi $B,$A,$A,4 # unpack
|
||||
vsldoi $C,$A,$A,8
|
||||
vsldoi $D,$A,$A,12
|
||||
vsldoi $F,$E,$E,4
|
||||
vsldoi $G,$E,$E,8
|
||||
vsldoi $H,$E,$E,12
|
||||
___
|
||||
$code.=<<___ if ($SZ==8);
|
||||
lvx_u $A,$x00,$ctx
|
||||
lvx_u $C,$x10,$ctx
|
||||
lvx_u $E,$x20,$ctx
|
||||
vsldoi $B,$A,$A,8 # unpack
|
||||
lvx_u $G,$x30,$ctx
|
||||
vsldoi $D,$C,$C,8
|
||||
vsldoi $F,$E,$E,8
|
||||
vsldoi $H,$G,$G,8
|
||||
___
|
||||
$code.=<<___;
|
||||
li r0,`($rounds-16)/16` # inner loop counter
|
||||
b Loop
|
||||
.align 5
|
||||
Loop:
|
||||
lvx $Ki,$x00,$Tbl
|
||||
li $idx,16
|
||||
lvx_u @X[0],0,$inp
|
||||
addi $inp,$inp,16
|
||||
stvx $A,$x00,$offload # offload $A-$H
|
||||
stvx $B,$x10,$offload
|
||||
stvx $C,$x20,$offload
|
||||
stvx $D,$x30,$offload
|
||||
stvx $E,$x40,$offload
|
||||
stvx $F,$x50,$offload
|
||||
stvx $G,$x60,$offload
|
||||
stvx $H,$x70,$offload
|
||||
vaddu${sz}m $H,$H,$Ki # h+K[i]
|
||||
lvx $Ki,$idx,$Tbl
|
||||
addi $idx,$idx,16
|
||||
___
|
||||
for ($i=0;$i<16;$i++) { &ROUND($i,@V); unshift(@V,pop(@V)); }
|
||||
$code.=<<___;
|
||||
mtctr r0
|
||||
b L16_xx
|
||||
.align 5
|
||||
L16_xx:
|
||||
___
|
||||
for (;$i<32;$i++) { &ROUND($i,@V); unshift(@V,pop(@V)); }
|
||||
$code.=<<___;
|
||||
bdnz L16_xx
|
||||
|
||||
lvx @X[2],$x00,$offload
|
||||
subic. $num,$num,1
|
||||
lvx @X[3],$x10,$offload
|
||||
vaddu${sz}m $A,$A,@X[2]
|
||||
lvx @X[4],$x20,$offload
|
||||
vaddu${sz}m $B,$B,@X[3]
|
||||
lvx @X[5],$x30,$offload
|
||||
vaddu${sz}m $C,$C,@X[4]
|
||||
lvx @X[6],$x40,$offload
|
||||
vaddu${sz}m $D,$D,@X[5]
|
||||
lvx @X[7],$x50,$offload
|
||||
vaddu${sz}m $E,$E,@X[6]
|
||||
lvx @X[8],$x60,$offload
|
||||
vaddu${sz}m $F,$F,@X[7]
|
||||
lvx @X[9],$x70,$offload
|
||||
vaddu${sz}m $G,$G,@X[8]
|
||||
vaddu${sz}m $H,$H,@X[9]
|
||||
bne Loop
|
||||
___
|
||||
$code.=<<___ if ($SZ==4);
|
||||
lvx @X[0],$idx,$Tbl
|
||||
addi $idx,$idx,16
|
||||
vperm $A,$A,$B,$Ki # pack the answer
|
||||
lvx @X[1],$idx,$Tbl
|
||||
vperm $E,$E,$F,$Ki
|
||||
vperm $A,$A,$C,@X[0]
|
||||
vperm $E,$E,$G,@X[0]
|
||||
vperm $A,$A,$D,@X[1]
|
||||
vperm $E,$E,$H,@X[1]
|
||||
stvx_4w $A,$x00,$ctx
|
||||
stvx_4w $E,$x10,$ctx
|
||||
___
|
||||
$code.=<<___ if ($SZ==8);
|
||||
vperm $A,$A,$B,$Ki # pack the answer
|
||||
vperm $C,$C,$D,$Ki
|
||||
vperm $E,$E,$F,$Ki
|
||||
vperm $G,$G,$H,$Ki
|
||||
stvx_u $A,$x00,$ctx
|
||||
stvx_u $C,$x10,$ctx
|
||||
stvx_u $E,$x20,$ctx
|
||||
stvx_u $G,$x30,$ctx
|
||||
___
|
||||
$code.=<<___;
|
||||
li r10,`$FRAME+8*16+15`
|
||||
mtlr $lrsave
|
||||
li r11,`$FRAME+8*16+31`
|
||||
mtspr 256,$vrsave
|
||||
lvx v20,r10,$sp # ABI says so
|
||||
addi r10,r10,32
|
||||
lvx v21,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v22,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v23,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v24,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v25,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v26,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v27,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v28,r10,$sp
|
||||
addi r10,r10,32
|
||||
lvx v29,r11,$sp
|
||||
addi r11,r11,32
|
||||
lvx v30,r10,$sp
|
||||
lvx v31,r11,$sp
|
||||
$POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
|
||||
$POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
|
||||
$POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
|
||||
$POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
|
||||
$POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
|
||||
$POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
|
||||
addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,4,1,0x80,6,3,0
|
||||
.long 0
|
||||
.size $func,.-$func
|
||||
___
|
||||
|
||||
# Ugly hack here, because PPC assembler syntax seem to vary too
|
||||
# much from platforms to platform...
|
||||
$code.=<<___;
|
||||
.align 6
|
||||
LPICmeup:
|
||||
mflr r0
|
||||
bcl 20,31,\$+4
|
||||
mflr $Tbl ; vvvvvv "distance" between . and 1st data entry
|
||||
addi $Tbl,$Tbl,`64-8`
|
||||
mtlr r0
|
||||
blr
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,0,0
|
||||
.space `64-9*4`
|
||||
___
|
||||
|
||||
if ($SZ==8) {
|
||||
local *table = sub {
|
||||
foreach(@_) { $code.=".quad $_,$_\n"; }
|
||||
};
|
||||
table(
|
||||
"0x428a2f98d728ae22","0x7137449123ef65cd",
|
||||
"0xb5c0fbcfec4d3b2f","0xe9b5dba58189dbbc",
|
||||
"0x3956c25bf348b538","0x59f111f1b605d019",
|
||||
"0x923f82a4af194f9b","0xab1c5ed5da6d8118",
|
||||
"0xd807aa98a3030242","0x12835b0145706fbe",
|
||||
"0x243185be4ee4b28c","0x550c7dc3d5ffb4e2",
|
||||
"0x72be5d74f27b896f","0x80deb1fe3b1696b1",
|
||||
"0x9bdc06a725c71235","0xc19bf174cf692694",
|
||||
"0xe49b69c19ef14ad2","0xefbe4786384f25e3",
|
||||
"0x0fc19dc68b8cd5b5","0x240ca1cc77ac9c65",
|
||||
"0x2de92c6f592b0275","0x4a7484aa6ea6e483",
|
||||
"0x5cb0a9dcbd41fbd4","0x76f988da831153b5",
|
||||
"0x983e5152ee66dfab","0xa831c66d2db43210",
|
||||
"0xb00327c898fb213f","0xbf597fc7beef0ee4",
|
||||
"0xc6e00bf33da88fc2","0xd5a79147930aa725",
|
||||
"0x06ca6351e003826f","0x142929670a0e6e70",
|
||||
"0x27b70a8546d22ffc","0x2e1b21385c26c926",
|
||||
"0x4d2c6dfc5ac42aed","0x53380d139d95b3df",
|
||||
"0x650a73548baf63de","0x766a0abb3c77b2a8",
|
||||
"0x81c2c92e47edaee6","0x92722c851482353b",
|
||||
"0xa2bfe8a14cf10364","0xa81a664bbc423001",
|
||||
"0xc24b8b70d0f89791","0xc76c51a30654be30",
|
||||
"0xd192e819d6ef5218","0xd69906245565a910",
|
||||
"0xf40e35855771202a","0x106aa07032bbd1b8",
|
||||
"0x19a4c116b8d2d0c8","0x1e376c085141ab53",
|
||||
"0x2748774cdf8eeb99","0x34b0bcb5e19b48a8",
|
||||
"0x391c0cb3c5c95a63","0x4ed8aa4ae3418acb",
|
||||
"0x5b9cca4f7763e373","0x682e6ff3d6b2b8a3",
|
||||
"0x748f82ee5defb2fc","0x78a5636f43172f60",
|
||||
"0x84c87814a1f0ab72","0x8cc702081a6439ec",
|
||||
"0x90befffa23631e28","0xa4506cebde82bde9",
|
||||
"0xbef9a3f7b2c67915","0xc67178f2e372532b",
|
||||
"0xca273eceea26619c","0xd186b8c721c0c207",
|
||||
"0xeada7dd6cde0eb1e","0xf57d4f7fee6ed178",
|
||||
"0x06f067aa72176fba","0x0a637dc5a2c898a6",
|
||||
"0x113f9804bef90dae","0x1b710b35131c471b",
|
||||
"0x28db77f523047d84","0x32caab7b40c72493",
|
||||
"0x3c9ebe0a15c9bebc","0x431d67c49c100d4c",
|
||||
"0x4cc5d4becb3e42b6","0x597f299cfc657e2a",
|
||||
"0x5fcb6fab3ad6faec","0x6c44198c4a475817","0");
|
||||
$code.=<<___ if (!$LENDIAN);
|
||||
.quad 0x0001020304050607,0x1011121314151617
|
||||
___
|
||||
$code.=<<___ if ($LENDIAN); # quad-swapped
|
||||
.quad 0x1011121314151617,0x0001020304050607
|
||||
___
|
||||
} else {
|
||||
local *table = sub {
|
||||
foreach(@_) { $code.=".long $_,$_,$_,$_\n"; }
|
||||
};
|
||||
table(
|
||||
"0x428a2f98","0x71374491","0xb5c0fbcf","0xe9b5dba5",
|
||||
"0x3956c25b","0x59f111f1","0x923f82a4","0xab1c5ed5",
|
||||
"0xd807aa98","0x12835b01","0x243185be","0x550c7dc3",
|
||||
"0x72be5d74","0x80deb1fe","0x9bdc06a7","0xc19bf174",
|
||||
"0xe49b69c1","0xefbe4786","0x0fc19dc6","0x240ca1cc",
|
||||
"0x2de92c6f","0x4a7484aa","0x5cb0a9dc","0x76f988da",
|
||||
"0x983e5152","0xa831c66d","0xb00327c8","0xbf597fc7",
|
||||
"0xc6e00bf3","0xd5a79147","0x06ca6351","0x14292967",
|
||||
"0x27b70a85","0x2e1b2138","0x4d2c6dfc","0x53380d13",
|
||||
"0x650a7354","0x766a0abb","0x81c2c92e","0x92722c85",
|
||||
"0xa2bfe8a1","0xa81a664b","0xc24b8b70","0xc76c51a3",
|
||||
"0xd192e819","0xd6990624","0xf40e3585","0x106aa070",
|
||||
"0x19a4c116","0x1e376c08","0x2748774c","0x34b0bcb5",
|
||||
"0x391c0cb3","0x4ed8aa4a","0x5b9cca4f","0x682e6ff3",
|
||||
"0x748f82ee","0x78a5636f","0x84c87814","0x8cc70208",
|
||||
"0x90befffa","0xa4506ceb","0xbef9a3f7","0xc67178f2","0");
|
||||
$code.=<<___ if (!$LENDIAN);
|
||||
.long 0x00010203,0x10111213,0x10111213,0x10111213
|
||||
.long 0x00010203,0x04050607,0x10111213,0x10111213
|
||||
.long 0x00010203,0x04050607,0x08090a0b,0x10111213
|
||||
___
|
||||
$code.=<<___ if ($LENDIAN); # word-swapped
|
||||
.long 0x10111213,0x10111213,0x10111213,0x00010203
|
||||
.long 0x10111213,0x10111213,0x04050607,0x00010203
|
||||
.long 0x10111213,0x08090a0b,0x04050607,0x00010203
|
||||
___
|
||||
}
|
||||
$code.=<<___;
|
||||
.asciz "SHA${bits} for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
||||
___
|
||||
|
||||
$code =~ s/\`([^\`]*)\`/eval $1/gem;
|
||||
print $code;
|
||||
close STDOUT;
|
|
@ -140,6 +140,9 @@ void FINGERPRINT_premain(void)
|
|||
}
|
||||
#endif
|
||||
} while(0);
|
||||
#if defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC)
|
||||
fips_openssl_cpuid_setup();
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -1 +1 @@
|
|||
HMAC-SHA1(fips_premain.c)= 65b20c3cec235cec85af848e1cd2dfdfa101804a
|
||||
HMAC-SHA1(fips_premain.c)= 2bfb57ef540bdba29220a45d65e1b4080de9adc1
|
||||
|
|
|
@ -712,6 +712,23 @@
|
|||
#define _bn_GF2m_mul_2x2 _fips_bn_GF2m_mul_2x2
|
||||
#define _OPENSSL_cleanse _FIPS_openssl_cleanse
|
||||
#endif
|
||||
#define aes_p8_encrypt fips_aes_p8_encrypt
|
||||
#define aes_p8_decrypt fips_aes_p8_decrypt
|
||||
#define aes_p8_set_encrypt_key fips_aes_p8_set_encrypt_key
|
||||
#define aes_p8_set_decrypt_key fips_aes_p8_set_decrypt_key
|
||||
#define aes_p8_cbc_encrypt fips_aes_p8_cbc_encrypt
|
||||
#define aes_p8_ctr32_encrypt_blocks fips_aes_p8_ctr32_encrypt_blocks
|
||||
#define aes_p8_xts_encrypt fips_aes_p8_xts_encrypt
|
||||
#define aes_p8_xts_decrypt fips_aes_p8_xts_decrypt
|
||||
#define gcm_init_p8 fips_gcm_init_p8
|
||||
#define gcm_gmult_p8 fips_gcm_gmult_p8
|
||||
#define gcm_ghash_p8 fips_gcm_ghash_p8
|
||||
#define sha256_block_p8 fips_sha256_block_p8
|
||||
#define sha512_block_p8 fips_sha512_block_p8
|
||||
#define sha256_block_ppc fips_sha256_block_ppc
|
||||
#define sha512_block_ppc fips_sha512_block_ppc
|
||||
#define OPENSSL_ppccap_P fips_openssl_ppccap_p
|
||||
#define OPENSSL_crypto207_probe fips_openssl_crypto207_probe
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# pragma const_seg("fipsro$b")
|
||||
|
|
Loading…
Reference in a new issue