Add some C64x assembly modules [by minor adjustments of C64x+ modules].

AES, SHA256 and SHA512 modules can actually replace corresponding
C64x+ modules. This is because C64x+ instructions don't actually
provide "killer-argument" advantage in these modules. As for SHA1,
even though its performance exactly same, C64x+ module is more
responsive to interrupts, i.e. doesn't inhibit them for as long
periods as C64x module.

Reviewed-by: Rich Salz <rsalz@openssl.org>
Reviewed-by: Tim Hudson <tjh@openssl.org>
Reviewed-by: Stephen Henson <steve@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/4265)
This commit is contained in:
Andy Polyakov 2016-11-25 13:11:09 +01:00 committed by Dr. Stephen Henson
parent fe36a69847
commit 5526e5791f
6 changed files with 3011 additions and 0 deletions

1375
crypto/aes/asm/aes-c64x.pl Normal file

File diff suppressed because it is too large Load diff

326
crypto/c64xcpuid.pl Normal file
View file

@ -0,0 +1,326 @@
#! /usr/bin/env perl
# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$code.=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg OPENSSL_rdtsc,_OPENSSL_rdtsc
.asg OPENSSL_cleanse,_OPENSSL_cleanse
.asg CRYPTO_memcmp,_CRYPTO_memcmp
.asg OPENSSL_atomic_add,_OPENSSL_atomic_add
.asg OPENSSL_wipe_cpu,_OPENSSL_wipe_cpu
.asg OPENSSL_instrument_bus,_OPENSSL_instrument_bus
.asg OPENSSL_instrument_bus2,_OPENSSL_instrument_bus2
.endif
.asg B3,RA
.asg 0x01AC0000,TIMER_BASE ; Timer 2
.global _OPENSSL_rdtsc
_OPENSSL_rdtsc:
.asmfunc
MVKL TIMER_BASE,A5
MVKH TIMER_BASE,A5
LDW *A5[0],A2 ; load CTL
LDW *A5[2],A4 ; load CTN
NOP 2
.if .BIG_ENDIAN
MVK 0x2c0,A7 ; internal clock source, don't hold, go
|| MVK -1,A6 ; maximum period
.else
MVK 0x2c0,A6 ; internal clock source, don't hold, go
|| MVK -1,A7 ; maximum period
.endif
[!A2] STDW A7:A6,*A5[0] ; fire it up
|| BNOP RA,5
.endasmfunc
.global _OPENSSL_cleanse
_OPENSSL_cleanse:
.asmfunc
ZERO A3:A2
|| ZERO B2
|| SHRU B4,3,B0 ; is length >= 8
|| ADD 1,A4,B6
[!B0] BNOP RA
|| [B0] SUB B0,1,B2
|| ZERO A1
|| ZERO B1
[B2] BDEC cleanse_loop?,B2
||[!B0] CMPLT 0,B4,A1
||[!B0] CMPLT 1,B4,B1
|| ZERO B5
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B2] BDEC cleanse_loop?,B2
||[!B0] CMPLT 2,B4,A1
||[!B0] CMPLT 3,B4,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B2] BDEC cleanse_loop?,B2
||[!B0] CMPLT 4,B4,A1
||[!B0] CMPLT 5,B4,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B2] BDEC cleanse_loop?,B2
||[!B0] CMPLT 6,B4,A1
[A1] STB A2,*A4++[2]
|| [B2] BDEC cleanse_loop?,B2
cleanse_loop?:
STNDW A3:A2,*A4++
|| SUB B4,8,B4
|| [B2] BDEC cleanse_loop?,B2
MV B4,B0 ; remaining bytes
|| ADD 1,A4,B6
|| BNOP RA
[B0] CMPLT 0,B0,A1
|| [B0] CMPLT 1,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B0] CMPLT 2,B0,A1
|| [B0] CMPLT 3,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B0] CMPLT 4,B0,A1
|| [B0] CMPLT 5,B0,B1
[A1] STB A2,*A4++[2]
|| [B1] STB B5,*B6++[2]
|| [B0] CMPLT 6,B0,A1
[A1] STB A2,*A4++[2]
.endasmfunc
.if 0
.global _CRYPTO_memcmp
_CRYPTO_memcmp:
.asmfunc
MV A6,B0
[!B0] BNOP RA
||[!B0] ZERO A4
|| [B0] ZERO A1:A0
[B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
[B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
[B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
[B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
[B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
XOR A5,B5,A1
|| [B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
memcmp_loop?:
OR A1,A0,A0
|| XOR A5,B5,A1
|| [B0] LDBU *A4++,A5
|| [B0] LDBU *B4++,B5
|| [B0] BDEC memcmp_loop?,B0
BNOP RA,3
ZERO A4
[A0] MVK 1,A4
.endasmfunc
.endif
.global _OPENSSL_atomic_add
_OPENSSL_atomic_add:
.asmfunc
BNOP atomic_store? ; pre-C64x+ systems are uni-processor, it's
|| LDW *A4,B5 ; enough to hold interrupts off through
; the load-update-store cycle to achieve
; atomicity
NOP
BNOP RA,3 ; and this branch stretches even over store
ADD B4,B5,B5
atomic_store?:
STW B5,*A4
|| MV B5,A4
.endasmfunc
.global _OPENSSL_wipe_cpu
_OPENSSL_wipe_cpu:
.asmfunc
ZERO A0
|| ZERO B0
|| ZERO A1
|| ZERO B1
ZERO A3:A2
|| MVD B0,B2
|| ZERO A4
|| ZERO B4
|| ZERO A5
|| ZERO B5
|| BNOP RA
ZERO A7:A6
|| ZERO B7:B6
|| ZERO A8
|| ZERO B8
|| ZERO A9
|| ZERO B9
ZERO A17:A16
|| ZERO B17:B16
|| ZERO A18
|| ZERO B18
|| ZERO A19
|| ZERO B19
ZERO A21:A20
|| ZERO B21:B20
|| ZERO A22
|| ZERO B22
|| ZERO A23
|| ZERO B23
ZERO A25:A24
|| ZERO B25:B24
|| ZERO A26
|| ZERO B26
|| ZERO A27
|| ZERO B27
ZERO A29:A28
|| ZERO B29:B28
|| ZERO A30
|| ZERO B30
|| ZERO A31
|| ZERO B31
.endasmfunc
CLFLUSH .macro CONTROL,ADDR,LEN
B passthrough?
|| STW ADDR,*CONTROL[0]
STW LEN,*CONTROL[1]
spinlock?:
LDW *CONTROL[1],A0
NOP 3
passthrough?:
NOP
[A0] BNOP spinlock?,5
.endm
.global _OPENSSL_instrument_bus
_OPENSSL_instrument_bus:
.asmfunc
MV B4,B0 ; reassign sizeof(output)
|| MV A4,B4 ; reassign output
|| MVK 0x00004030,A3
|| MVKL TIMER_BASE,B16
MV B0,A4 ; return value
|| MVK 1,A1
|| MVKH 0x01840000,A3 ; L1DWIBAR
|| MVKH TIMER_BASE,B16
LDW *B16[2],B8 ; collect 1st tick
|| MVK 0x00004010,A5
NOP 4
MV B8,B9 ; lasttick = tick
|| MVK 0,B7 ; lastdiff = 0
|| MVKH 0x01840000,A5 ; L2WIBAR
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LDW *B4,B5
NOP 4
ADD B7,B5,B5
STW B5,*B4
bus_loop1?:
LDW *B16[2],B8
|| [B0] SUB B0,1,B0
NOP 4
SUB B8,B9,B7 ; lastdiff = tick - lasttick
|| MV B8,B9 ; lasttick = tick
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LDW *B4,B5
NOP 4
ADD B7,B5,B5
STW B5,*B4 ; [!B1] is removed to flatten samples
|| ADDK 4,B4
|| [B0] BNOP bus_loop1?,5
BNOP RA,5
.endasmfunc
.global _OPENSSL_instrument_bus2
_OPENSSL_instrument_bus2:
.asmfunc
MV A6,B0 ; reassign max
|| MV B4,A6 ; reassing sizeof(output)
|| MVK 0x00004030,A3
|| MVKL TIMER_BASE,B16
MV A4,B4 ; reassign output
|| MVK 0,A4 ; return value
|| MVK 1,A1
|| MVKH 0x01840000,A3 ; L1DWIBAR
|| MVKH TIMER_BASE,B16
LDW *B16[2],B8 ; collect 1st tick
|| MVK 0x00004010,A5
NOP 4
MV B8,B9 ; lasttick = tick
|| MVK 0,B7 ; lastdiff = 0
|| MVKH 0x01840000,A5 ; L2WIBAR
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LDW *B4,B5
NOP 4
ADD B7,B5,B5
STW B5,*B4
LDW *B16[2],B8 ; collect 1st diff
NOP 4
SUB B8,B9,B7 ; lastdiff = tick - lasttick
|| MV B8,B9 ; lasttick = tick
|| SUB B0,1,B0
bus_loop2?:
CLFLUSH A3,B4,A1 ; write-back and invalidate L1D line
CLFLUSH A5,B4,A1 ; write-back and invalidate L2 line
LDW *B4,B5
NOP 4
ADD B7,B5,B5
STW B5,*B4 ; [!B1] is removed to flatten samples
||[!B0] BNOP bus_loop2_done?,2
|| SUB B0,1,B0
LDW *B16[2],B8
NOP 4
SUB B8,B9,B8
|| MV B8,B9
CMPEQ B8,B7,B2
|| MV B8,B7
[!B2] ADDAW B4,1,B4
||[!B2] ADDK 1,A4
CMPEQ A4,A6,A2
[!A2] BNOP bus_loop2?,5
bus_loop2_done?:
BNOP RA,5
.endasmfunc
.if __TI_EABI__
.sect ".init_array"
.else
.sect ".pinit"
.endif
.align 4
.long _OPENSSL_rdtsc ; auto-start timer
___
print $code;
close STDOUT;

View file

@ -0,0 +1,230 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA1 for C64x.
#
# November 2016
#
# This is fully-unrolled SHA1 implementation. It's 25% faster than
# one with compact loops, doesn't use in-memory ring buffer, as
# everything is accomodated in registers, and has "perfect" interrupt
# agility. Drawback is obviously the code size...
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTX,$INP,$NUM) = ("A4","B4","A6"); # arguments
($A,$B,$C,$D,$E, $Arot,$F,$F0,$K) = map("A$_",(16..20, 21..24));
@V = ($A,$B,$C,$D,$E);
@X = map("B$_",(16..31));
($Actx,$Bctx,$Cctx,$Dctx,$Ectx) = map("A$_",(3,6..9)); # zaps $NUM
sub BODY_00_19 {
my ($i,$a,$b,$c,$d,$e) = @_;
my $j = ($i+1)&15;
$code.=<<___ if ($i<14);
ROTL $a,5,$Arot ;; $i
|| AND $c,$b,$F
|| ANDN $d,$b,$F0
|| ADD $K,$e,$e ; E+=K
|| LDNW *${INP}++,@X[$i+2]
OR $F0,$F,$F ; F_00_19(B,C,D)
|| ROTL $b,30,$b
|| SWAP2 @X[$i+1],@X[$i+1]
|| ADD @X[$i],$e,$e ; E+=X[i]
ADD $Arot,$e,$e ; E+=rot(A,5)
|| SWAP4 @X[$i+1],@X[$i+1]
ADD $F,$e,$e ; E+=F_00_19(B,C,D)
___
$code.=<<___ if ($i==14);
ROTL $a,5,$Arot ;; $i
|| AND $c,$b,$F
|| ANDN $d,$b,$F0
|| ADD $K,$e,$e ; E+=K
OR $F0,$F,$F ; F_00_19(B,C,D)
|| ROTL $b,30,$b
|| ADD @X[$i],$e,$e ; E+=X[i]
|| SWAP2 @X[$i+1],@X[$i+1]
ADD $Arot,$e,$e ; E+=rot(A,5)
|| SWAP4 @X[$i+1],@X[$i+1]
ADD $F,$e,$e ; E+=F_00_19(B,C,D)
___
$code.=<<___ if ($i==15);
|| XOR @X[($j+2)&15],@X[$j],@X[$j]
ROTL $a,5,$Arot ;; $i
|| AND $c,$b,$F
|| ANDN $d,$b,$F0
|| ADD $K,$e,$e ; E+=K
|| XOR @X[($j+8)&15],@X[$j],@X[$j]
OR $F0,$F,$F ; F_00_19(B,C,D)
|| ROTL $b,30,$b
|| ADD @X[$i],$e,$e ; E+=X[i]
|| XOR @X[($j+13)&15],@X[$j],@X[$j]
ADD $Arot,$e,$e ; E+=rot(A,5)
|| ROTL @X[$j],1,@X[$j]
ADD $F,$e,$e ; E+=F_00_19(B,C,D)
___
$code.=<<___ if ($i>15);
|| XOR @X[($j+2)&15],@X[$j],@X[$j]
ROTL $a,5,$Arot ;; $i
|| AND $c,$b,$F
|| ANDN $d,$b,$F0
|| ADD $K,$e,$e ; E+=K
|| XOR @X[($j+8)&15],@X[$j],@X[$j]
OR $F0,$F,$F ; F_00_19(B,C,D)
|| ROTL $b,30,$b
|| ADD @X[$i&15],$e,$e ; E+=X[i]
|| XOR @X[($j+13)&15],@X[$j],@X[$j]
ADD $Arot,$e,$e ; E+=rot(A,5)
|| ROTL @X[$j],1,@X[$j]
ADD $F,$e,$e ; E+=F_00_19(B,C,D)
___
}
sub BODY_20_39 {
my ($i,$a,$b,$c,$d,$e) = @_;
my $j = ($i+1)&15;
$code.=<<___ if ($i<79);
|| XOR @X[($j+2)&15],@X[$j],@X[$j]
ROTL $a,5,$Arot ;; $i
|| XOR $c,$b,$F
|| ADD $K,$e,$e ; E+=K
|| XOR @X[($j+8)&15],@X[$j],@X[$j]
XOR $d,$F,$F ; F_20_39(B,C,D)
|| ROTL $b,30,$b
|| ADD @X[$i&15],$e,$e ; E+=X[i]
|| XOR @X[($j+13)&15],@X[$j],@X[$j]
ADD $Arot,$e,$e ; E+=rot(A,5)
|| ROTL @X[$j],1,@X[$j]
ADD $F,$e,$e ; E+=F_20_39(B,C,D)
___
$code.=<<___ if ($i==79);
|| [A0] B loop?
|| [A0] LDNW *${INP}++,@X[0] ; pre-fetch input
ROTL $a,5,$Arot ;; $i
|| XOR $c,$b,$F
|| ADD $K,$e,$e ; E+=K
|| [A0] LDNW *${INP}++,@X[1]
XOR $d,$F,$F ; F_20_39(B,C,D)
|| ROTL $b,30,$b
|| ADD @X[$i&15],$e,$e ; E+=X[i]
ADD $Arot,$e,$e ; E+=rot(A,5)
ADD $F,$e,$e ; E+=F_20_39(B,C,D)
|| ADD $Bctx,$a,$a ; accumulate context
|| ADD $Cctx,$b,$b
ADD $Dctx,$c,$c
|| ADD $Ectx,$d,$d
|| ADD $Actx,$e,$e
;;===== branch to loop? is taken here
___
}
sub BODY_40_59 {
my ($i,$a,$b,$c,$d,$e) = @_;
my $j = ($i+1)&15;
$code.=<<___;
|| XOR @X[($j+2)&15],@X[$j],@X[$j]
ROTL $a,5,$Arot ;; $i
|| AND $c,$b,$F
|| AND $d,$b,$F0
|| ADD $K,$e,$e ; E+=K
|| XOR @X[($j+8)&15],@X[$j],@X[$j]
XOR $F0,$F,$F
|| AND $c,$d,$F0
|| ROTL $b,30,$b
|| XOR @X[($j+13)&15],@X[$j],@X[$j]
|| ADD @X[$i&15],$e,$e ; E+=X[i]
XOR $F0,$F,$F ; F_40_59(B,C,D)
|| ADD $Arot,$e,$e ; E+=rot(A,5)
|| ROTL @X[$j],1,@X[$j]
ADD $F,$e,$e ; E+=F_20_39(B,C,D)
___
}
$code=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg sha1_block_data_order,_sha1_block_data_order
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg MV,SWAP2
.asg MV,SWAP4
.endif
.global _sha1_block_data_order
_sha1_block_data_order:
.asmfunc
MV $NUM,A0 ; reassign $NUM
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] LDW *${CTX}[0],$A ; load A-E...
[A0] LDW *${CTX}[1],$B
[A0] LDW *${CTX}[2],$C
[A0] LDW *${CTX}[3],$D
[A0] LDW *${CTX}[4],$E
[A0] LDNW *${INP}++,@X[0] ; pre-fetch input
[A0] LDNW *${INP}++,@X[1]
NOP 3
loop?:
SUB A0,1,A0
|| MV $A,$Actx
|| MVD $B,$Bctx
|| SWAP2 @X[0],@X[0]
|| MVKL 0x5a827999,$K
MVKH 0x5a827999,$K ; K_00_19
|| MV $C,$Cctx
|| MV $D,$Dctx
|| MVD $E,$Ectx
|| SWAP4 @X[0],@X[0]
___
for ($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
|| MVKL 0x6ed9eba1,$K
MVKH 0x6ed9eba1,$K ; K_20_39
___
for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
|| MVKL 0x8f1bbcdc,$K
MVKH 0x8f1bbcdc,$K ; K_40_59
___
for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
|| MVKL 0xca62c1d6,$K
MVKH 0xca62c1d6,$K ; K_60_79
___
for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
BNOP RA ; return
STW $A,*${CTX}[0] ; emit A-E...
STW $B,*${CTX}[1]
STW $C,*${CTX}[2]
STW $D,*${CTX}[3]
STW $E,*${CTX}[4]
.endasmfunc
.sect .const
.cstring "SHA1 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;

330
crypto/sha/asm/sha1-c64x.pl Normal file
View file

@ -0,0 +1,330 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA1 for C64x.
#
# November 2016
#
# If compared to compiler-generated code with similar characteristics,
# i.e. compiled with OPENSSL_SMALL_FOOTPRINT and utilizing SPLOOPs,
# this implementation is 25% smaller and >2x faster. In absolute terms
# performance is (quite impressive) ~6.5 cycles per processed byte.
# Unlike its predecessor, sha1-c64xplus module, this module has worse
# interrupt agility. While original added up to 5 cycles delay to
# response to interrupt, this module adds up to 100. Fully unrolled
# implementation doesn't add any delay and even 25% faster, but is
# almost 5x larger...
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTX,$INP,$NUM) = ("A4","B4","A6"); # arguments
($A,$B,$C,$D,$E, $Arot,$F,$F0,$T,$K) = map("A$_",(16..20, 21..25));
($X0,$X2,$X8,$X13) = ("A26","B26","A27","B27");
($TX0,$TX1,$TX2,$TX3) = map("B$_",(28..31));
($XPA,$XPB) = ("A5","B5"); # X circular buffer
($Actx,$Bctx,$Cctx,$Dctx,$Ectx) = map("A$_",(3,6..9)); # zaps $NUM
$code=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg sha1_block_data_order,_sha1_block_data_order
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg MV,SWAP2
.asg MV,SWAP4
.endif
.global _sha1_block_data_order
_sha1_block_data_order:
.asmfunc stack_usage(64)
MV $NUM,A0 ; reassign $NUM
|| MVK -64,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
|| [A0] MV SP,FP
[A0] LDW *${CTX}[0],$A ; load A-E...
|| [A0] AND B0,SP,SP ; align stack at 64 bytes
[A0] LDW *${CTX}[1],$B
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
[A0] LDW *${CTX}[2],$C
|| [A0] MVK 0x00404,B0
[A0] LDW *${CTX}[3],$D
|| [A0] MVKH 0x50000,B0 ; 0x050404, 64 bytes for $XP[AB]
[A0] LDW *${CTX}[4],$E
|| [A0] MVC B0,AMR ; setup circular addressing
LDNW *${INP}++,$TX1 ; pre-fetch input
NOP 1
loop?:
MVKL 0x5a827999,$K
|| ADDAW SP,2,$XPB
|| SUB A0,1,A0
MVKH 0x5a827999,$K ; K_00_19
|| MV $A,$Actx
|| MV $B,$Bctx
;;==================================================
B body_00_13? ; BODY_00_13
|| MVK 11,B0
|| MV $XPB,$XPA
|| MV $C,$Cctx
|| MV $D,$Dctx
|| MVD $E,$Ectx
body_00_13?:
ROTL $A,5,$Arot
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
|| LDNW *${INP}++,$TX1
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX3 ; byte swap
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
ADD $TX3,$T,$A ; A=T+Xi
|| STW $TX3,*${XPB}++
|| BDEC body_00_13?,B0
;;==================================================
ROTL $A,5,$Arot ; BODY_14
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
|| LDNW *${INP}++,$TX1
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX2 ; byte swap
|| LDW *${XPA}++,$X0 ; fetches from X ring buffer are
|| LDW *${XPB}[4],$X2 ; 2 iterations ahead
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
;;==================================================
ROTL $A,5,$Arot ; BODY_15
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
|| SWAP2 $TX1,$TX2
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| SWAP4 $TX2,$TX2 ; byte swap
|| XOR $X0,$X2,$TX0 ; Xupdate XORs are 1 iteration ahead
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
;;==================================================
|| B body_16_19? ; BODY_16_19
|| MVK 1,B0
body_16_19?:
ROTL $A,5,$Arot
|| AND $C,$B,$F
|| ANDN $D,$B,$F0
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $F0,$F,$F ; F_00_19(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_00_19(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
|| BDEC body_16_19?,B0
MVKL 0x6ed9eba1,$K
|| MVK 17,B0
MVKH 0x6ed9eba1,$K ; K_20_39
___
sub BODY_20_39 {
my $label = shift;
$code.=<<___;
;;==================================================
|| B $label ; BODY_20_39
$label:
ROTL $A,5,$Arot
|| XOR $B,$C,$F
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $D,$F,$F ; F_20_39(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_20_39(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++ ; last one is redundant
|| XOR $TX0,$TX1,$TX1
|| BDEC $label,B0
___
} &BODY_20_39("body_20_39?");
$code.=<<___;
;;==================================================
MVKL 0x8f1bbcdc,$K
|| MVK 17,B0
MVKH 0x8f1bbcdc,$K ; K_40_59
|| B body_40_59? ; BODY_40_59
|| AND $B,$C,$F
|| AND $B,$D,$F0
body_40_59?:
ROTL $A,5,$Arot
|| XOR $F0,$F,$F
|| AND $C,$D,$F0
|| ADD $K,$E,$T ; T=E+K
|| ROTL $TX1,1,$TX2 ; Xupdate output
XOR $F0,$F,$F ; F_40_59(B,C,D)
|| MV $D,$E ; E=D
|| MV $C,$D ; D=C
ADD $F,$T,$T ; T+=F_40_59(B,C,D)
|| ROTL $B,30,$C ; C=ROL(B,30)
|| XOR $X0,$X2,$TX0
|| LDW *${XPA}++,$X0
|| LDW *${XPB}[4],$X2
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| MV $A,$B ; B=A
|| XOR $X8,$X13,$TX1
|| LDW *${XPA}[7],$X8
|| MV $TX3,$X13 ; || LDW *${XPB}[15],$X13
|| MV $TX2,$TX3
ADD $TX2,$T,$A ; A=T+Xi
|| STW $TX2,*${XPB}++
|| XOR $TX0,$TX1,$TX1
|| AND $B,$C,$F
|| AND $B,$D,$F0
|| BDEC body_40_59?,B0
MVKL 0xca62c1d6,$K
|| MVK 16,B0
MVKH 0xca62c1d6,$K ; K_60_79
___
&BODY_20_39("body_60_78?"); # BODY_60_78
$code.=<<___;
;;==================================================
[A0] B loop?
|| ROTL $A,5,$Arot ; BODY_79
|| XOR $B,$C,$F
|| ROTL $TX1,1,$TX2 ; Xupdate output
[A0] LDNW *${INP}++,$TX1 ; pre-fetch input
|| ADD $K,$E,$T ; T=E+K
|| XOR $D,$F,$F ; F_20_39(B,C,D)
ADD $F,$T,$T ; T+=F_20_39(B,C,D)
|| ADD $Ectx,$D,$E ; E=D,E+=Ectx
|| ADD $Dctx,$C,$D ; D=C,D+=Dctx
|| ROTL $B,30,$C ; C=ROL(B,30)
ADD $Arot,$T,$T ; T+=ROL(A,5)
|| ADD $Bctx,$A,$B ; B=A,B+=Bctx
ADD $TX2,$T,$A ; A=T+Xi
ADD $Actx,$A,$A ; A+=Actx
|| ADD $Cctx,$C,$C ; C+=Cctx
;; end of loop?
BNOP RA ; return
|| MV FP,SP ; restore stack pointer
|| LDW *FP[0],FP ; restore frame pointer
STW $A,*${CTX}[0] ; emit A-E...
|| MVK 0,B0
STW $B,*${CTX}[1]
|| MVC B0,AMR ; clear AMR
STW $C,*${CTX}[2]
STW $D,*${CTX}[3]
STW $E,*${CTX}[4]
.endasmfunc
.sect .const
.cstring "SHA1 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;

View file

@ -0,0 +1,313 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA256 for C64x.
#
# November 2016
#
# Performance is just below 10 cycles per processed byte, which is
# almost 40% faster than compiler-generated code. Unroll is unlikely
# to give more than ~8% improvement...
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
$K256="A3";
($A,$Actx,$B,$Bctx,$C,$Cctx,$D,$Dctx,$T2,$S0,$s1,$t0a,$t1a,$t2a,$X9,$X14)
=map("A$_",(16..31));
($E,$Ectx,$F,$Fctx,$G,$Gctx,$H,$Hctx,$T1,$S1,$s0,$t0e,$t1e,$t2e,$X1,$X15)
=map("B$_",(16..31));
($Xia,$Xib)=("A5","B5"); # circular/ring buffer
$CTXB=$t2e;
($Xn,$X0,$K)=("B7","B8","B9");
($Maj,$Ch)=($T2,"B6");
$code.=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.nocmp
.asg sha256_block_data_order,_sha256_block_data_order
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg SWAP2,MV
.asg SWAP4,MV
.endif
.global _sha256_block_data_order
_sha256_block_data_order:
__sha256_block:
.asmfunc stack_usage(64)
MV $NUM,A0 ; reassign $NUM
|| MVK -64,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--[16] ; save frame pointer and alloca(64)
|| [A0] MV SP,FP
[A0] ADDKPC _sha256_block_data_order,B2
|| [A0] AND B0,SP,SP ; align stack at 64 bytes
.if __TI_EABI__
[A0] MVK 0x00404,B1
|| [A0] MVKL \$PCR_OFFSET(K256,__sha256_block),$K256
[A0] MVKH 0x50000,B1
|| [A0] MVKH \$PCR_OFFSET(K256,__sha256_block),$K256
.else
[A0] MVK 0x00404,B1
|| [A0] MVKL (K256-__sha256_block),$K256
[A0] MVKH 0x50000,B1
|| [A0] MVKH (K256-__sha256_block),$K256
.endif
[A0] MVC B1,AMR ; setup circular addressing
|| [A0] MV SP,$Xia
[A0] MV SP,$Xib
|| [A0] ADD B2,$K256,$K256
|| [A0] MV $CTXA,$CTXB
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
LDW *${CTXA}[0],$A ; load ctx
|| LDW *${CTXB}[4],$E
LDW *${CTXA}[1],$B
|| LDW *${CTXB}[5],$F
LDW *${CTXA}[2],$C
|| LDW *${CTXB}[6],$G
LDW *${CTXA}[3],$D
|| LDW *${CTXB}[7],$H
LDNW *$INP++,$Xn ; pre-fetch input
LDW *$K256++,$K ; pre-fetch K256[0]
NOP
ADDAW $Xia,9,$Xia
outerloop?:
SUB A0,1,A0
|| MV $A,$Actx
|| MV $E,$Ectx
|| MVD $B,$Bctx
|| MVD $F,$Fctx
MV $C,$Cctx
|| MV $G,$Gctx
|| MVD $D,$Dctx
|| MVD $H,$Hctx
|| SWAP4 $Xn,$X0
MVK 14,B0 ; loop counter
|| SWAP2 $X0,$X0
loop_00_14?: ; BODY_00_14
LDNW *$INP++,$Xn
|| ROTL $A,30,$S0
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $K,$H,$T1 ; T1 = h + K256[i]
|| [B0] BDEC loop_00_14?,B0
ADD $X0,$T1,$T1 ; T1 += X[i];
|| STW $X0,*$Xib++
|| XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| ROTL $G,0,$H ; h = g
|| MV $F,$G ; g = f
|| MV $X0,$X14
|| SWAP4 $Xn,$X0
SWAP2 $X0,$X0
|| MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
MV $B,$C ; c = b
|| MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
;;===== branch to loop00_14? is taken here
ROTL $A,30,$S0 ; BODY_15
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
|| LDW *${Xib}[1],$Xn ; modulo-scheduled
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
|| LDW *${Xib}[2],$X1 ; modulo-scheduled
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $K,$H,$T1 ; T1 = h + K256[i]
ADD $X0,$T1,$T1 ; T1 += X[i];
|| STW $X0,*$Xib++
|| XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| ROTL $G,0,$H ; h = g
|| MV $F,$G ; g = f
|| MV $X0,$X15
MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
|| MV $Xn,$X0 ; modulo-scheduled
|| LDW *$Xia,$X9 ; modulo-scheduled
|| ROTL $X1,25,$t0e ; modulo-scheduled
|| ROTL $X14,15,$t0a ; modulo-scheduled
SHRU $X1,3,$s0 ; modulo-scheduled
|| SHRU $X14,10,$s1 ; modulo-scheduled
|| ROTL $B,0,$C ; c = b
|| MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
MVK 47,B1 ; loop counter
|| ROTL $X1,14,$t1e ; modulo-scheduled
|| ROTL $X14,13,$t1a ; modulo-scheduled
loop_16_63?: ; BODY_16_63
XOR $t0e,$s0,$s0
|| XOR $t0a,$s1,$s1
|| MV $X15,$X14
|| MV $X1,$Xn
XOR $t1e,$s0,$s0 ; sigma0(X[i+1])
|| XOR $t1a,$s1,$s1 ; sigma1(X[i+14])
|| LDW *${Xib}[2],$X1 ; module-scheduled
ROTL $A,30,$S0
|| OR $A,$B,$Maj
|| AND $A,$B,$t2a
|| ROTL $E,26,$S1
|| AND $F,$E,$Ch
|| ANDN $G,$E,$t2e
|| ADD $X9,$X0,$X0 ; X[i] += X[i+9]
ROTL $A,19,$t0a
|| AND $C,$Maj,$Maj
|| ROTL $E,21,$t0e
|| XOR $t2e,$Ch,$Ch ; Ch(e,f,g) = (e&f)^(~e&g)
|| ADD $s0,$X0,$X0 ; X[i] += sigma1(X[i+1])
ROTL $A,10,$t1a
|| OR $t2a,$Maj,$Maj ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ROTL $E,7,$t1e
|| ADD $H,$K,$T1 ; T1 = h + K256[i]
|| ADD $s1,$X0,$X0 ; X[i] += sigma1(X[i+14])
|| [B1] BDEC loop_16_63?,B1
XOR $t0a,$S0,$S0
|| XOR $t0e,$S1,$S1
|| ADD $X0,$T1,$T1 ; T1 += X[i]
|| STW $X0,*$Xib++
XOR $t1a,$S0,$S0 ; Sigma0(a)
|| XOR $t1e,$S1,$S1 ; Sigma1(e)
|| ADD $Ch,$T1,$T1 ; T1 += Ch(e,f,g)
|| MV $X0,$X15
|| ROTL $G,0,$H ; h = g
|| LDW *$K256++,$K ; pre-fetch K256[i+1]
ADD $S1,$T1,$T1 ; T1 += Sigma1(e)
|| ADD $S0,$Maj,$T2 ; T2 = Sigma0(a) + Maj(a,b,c)
|| MV $F,$G ; g = f
|| MV $Xn,$X0 ; modulo-scheduled
|| LDW *++$Xia,$X9 ; modulo-scheduled
|| ROTL $X1,25,$t0e ; module-scheduled
|| ROTL $X14,15,$t0a ; modulo-scheduled
ROTL $X1,14,$t1e ; modulo-scheduled
|| ROTL $X14,13,$t1a ; modulo-scheduled
|| MV $E,$F ; f = e
|| ADD $D,$T1,$E ; e = d + T1
|| MV $C,$D ; d = c
|| MV $B,$C ; c = b
MV $A,$B ; b = a
|| ADD $T1,$T2,$A ; a = T1 + T2
|| SHRU $X1,3,$s0 ; modulo-scheduled
|| SHRU $X14,10,$s1 ; modulo-scheduled
;;===== branch to loop16_63? is taken here
[A0] B outerloop?
|| [A0] LDNW *$INP++,$Xn ; pre-fetch input
|| [A0] ADDK -260,$K256 ; rewind K256
|| ADD $Actx,$A,$A ; accumulate ctx
|| ADD $Ectx,$E,$E
|| ADD $Bctx,$B,$B
ADD $Fctx,$F,$F
|| ADD $Cctx,$C,$C
|| ADD $Gctx,$G,$G
|| ADD $Dctx,$D,$D
|| ADD $Hctx,$H,$H
|| [A0] LDW *$K256++,$K ; pre-fetch K256[0]
[!A0] BNOP RA
||[!A0] MV $CTXA,$CTXB
[!A0] MV FP,SP ; restore stack pointer
||[!A0] LDW *FP[0],FP ; restore frame pointer
[!A0] STW $A,*${CTXA}[0] ; save ctx
||[!A0] STW $E,*${CTXB}[4]
||[!A0] MVK 0,B0
[!A0] STW $B,*${CTXA}[1]
||[!A0] STW $F,*${CTXB}[5]
||[!A0] MVC B0,AMR ; clear AMR
STW $C,*${CTXA}[2]
|| STW $G,*${CTXB}[6]
STW $D,*${CTXA}[3]
|| STW $H,*${CTXB}[7]
.endasmfunc
.if __TI_EABI__
.sect ".text:sha_asm.const"
.else
.sect ".const:sha_asm"
.endif
.align 128
K256:
.uword 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.uword 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.uword 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.uword 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.uword 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.uword 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.uword 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.uword 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.uword 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.uword 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.uword 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.uword 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.uword 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.uword 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.uword 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.uword 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
.cstring "SHA256 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;

View file

@ -0,0 +1,437 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# SHA512 for C64x.
#
# November 2016
#
# Performance is ~19 cycles per processed byte. Compared to block
# transform function from sha512.c compiled with cl6x with -mv6400+
# -o2 -DOPENSSL_SMALL_FOOTPRINT it's almost 7x faster and 2x smaller.
# Loop unroll won't make it, this implementation, any faster, because
# it's effectively dominated by SHRU||SHL pairs and you can't schedule
# more of them.
#
# !!! Note that this module uses AMR, which means that all interrupt
# service routines are expected to preserve it and for own well-being
# zero it upon entry.
while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
($CTXA,$INP,$NUM) = ("A4","B4","A6"); # arguments
$K512="A3";
($Ahi,$Actxhi,$Bhi,$Bctxhi,$Chi,$Cctxhi,$Dhi,$Dctxhi,
$Ehi,$Ectxhi,$Fhi,$Fctxhi,$Ghi,$Gctxhi,$Hhi,$Hctxhi)=map("A$_",(16..31));
($Alo,$Actxlo,$Blo,$Bctxlo,$Clo,$Cctxlo,$Dlo,$Dctxlo,
$Elo,$Ectxlo,$Flo,$Fctxlo,$Glo,$Gctxlo,$Hlo,$Hctxlo)=map("B$_",(16..31));
($S1hi,$CHhi,$S0hi,$t0hi)=map("A$_",(10..13));
($S1lo,$CHlo,$S0lo,$t0lo)=map("B$_",(10..13));
($T1hi, $T2hi)= ("A6","A7");
($T1lo,$T1carry,$T2lo,$T2carry)=("B6","B7","B8","B9");
($Khi,$Klo)=("A9","A8");
($MAJhi,$MAJlo)=($T2hi,$T2lo);
($t1hi,$t1lo)=($Khi,"B2");
$CTXB=$t1lo;
($Xihi,$Xilo)=("A5","B5"); # circular/ring buffer
$code.=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.nocmp
.asg sha512_block_data_order,_sha512_block_data_order
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .BIG_ENDIAN
.asg $Khi,KHI
.asg $Klo,KLO
.else
.asg $Khi,KLO
.asg $Klo,KHI
.endif
.global _sha512_block_data_order
_sha512_block_data_order:
__sha512_block:
.asmfunc stack_usage(40+128)
MV $NUM,A0 ; reassign $NUM
|| MVK -128,B0
[!A0] BNOP RA ; if ($NUM==0) return;
|| [A0] STW FP,*SP--(40) ; save frame pointer
|| [A0] MV SP,FP
[A0] STDW B13:B12,*SP[4]
|| [A0] MVK 0x00404,B1
[A0] STDW B11:B10,*SP[3]
|| [A0] STDW A13:A12,*FP[-3]
|| [A0] MVKH 0x60000,B1
[A0] STDW A11:A10,*SP[1]
|| [A0] MVC B1,AMR ; setup circular addressing
|| [A0] ADD B0,SP,SP ; alloca(128)
.if __TI_EABI__
[A0] AND B0,SP,SP ; align stack at 128 bytes
|| [A0] ADDKPC __sha512_block,B1
|| [A0] MVKL \$PCR_OFFSET(K512,__sha512_block),$K512
[A0] MVKH \$PCR_OFFSET(K512,__sha512_block),$K512
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
.else
[A0] AND B0,SP,SP ; align stack at 128 bytes
|| [A0] ADDKPC __sha512_block,B1
|| [A0] MVKL (K512-__sha512_block),$K512
[A0] MVKH (K512-__sha512_block),$K512
|| [A0] SUBAW SP,2,SP ; reserve two words above buffer
.endif
ADDAW SP,3,$Xilo
ADD SP,4*2,$Xihi ; ADDAW SP,2,$Xihi
|| MV $CTXA,$CTXB
LDW *${CTXA}[0^.LITTLE_ENDIAN],$Ahi ; load ctx
|| LDW *${CTXB}[1^.LITTLE_ENDIAN],$Alo
|| ADD B1,$K512,$K512
LDW *${CTXA}[2^.LITTLE_ENDIAN],$Bhi
|| LDW *${CTXB}[3^.LITTLE_ENDIAN],$Blo
LDW *${CTXA}[4^.LITTLE_ENDIAN],$Chi
|| LDW *${CTXB}[5^.LITTLE_ENDIAN],$Clo
LDW *${CTXA}[6^.LITTLE_ENDIAN],$Dhi
|| LDW *${CTXB}[7^.LITTLE_ENDIAN],$Dlo
LDW *${CTXA}[8^.LITTLE_ENDIAN],$Ehi
|| LDW *${CTXB}[9^.LITTLE_ENDIAN],$Elo
LDW *${CTXA}[10^.LITTLE_ENDIAN],$Fhi
|| LDW *${CTXB}[11^.LITTLE_ENDIAN],$Flo
LDW *${CTXA}[12^.LITTLE_ENDIAN],$Ghi
|| LDW *${CTXB}[13^.LITTLE_ENDIAN],$Glo
LDW *${CTXA}[14^.LITTLE_ENDIAN],$Hhi
|| LDW *${CTXB}[15^.LITTLE_ENDIAN],$Hlo
LDNDW *$INP++,B11:B10 ; pre-fetch input
LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[0]
outerloop?:
MVK 15,B0 ; loop counters
|| MVK 64,B1
|| SUB A0,1,A0
MV $Ahi,$Actxhi
|| MV $Alo,$Actxlo
|| MV $Bhi,$Bctxhi
|| MV $Blo,$Bctxlo
|| MV $Chi,$Cctxhi
|| MV $Clo,$Cctxlo
|| MVD $Dhi,$Dctxhi
|| MVD $Dlo,$Dctxlo
MV $Ehi,$Ectxhi
|| MV $Elo,$Ectxlo
|| MV $Fhi,$Fctxhi
|| MV $Flo,$Fctxlo
|| MV $Ghi,$Gctxhi
|| MV $Glo,$Gctxlo
|| MVD $Hhi,$Hctxhi
|| MVD $Hlo,$Hctxlo
loop0_15?:
.if .BIG_ENDIAN
MV B11,$T1hi
|| MV B10,$T1lo
.else
SWAP4 B10,$T1hi
|| SWAP4 B11,$T1lo
SWAP2 $T1hi,$T1hi
|| SWAP2 $T1lo,$T1lo
.endif
STW $T1hi,*$Xihi++[2] ; original loop16_79?
|| STW $T1lo,*$Xilo++[2] ; X[i] = T1
|| ADD $Hhi,$T1hi,$T1hi
|| ADDU $Hlo,$T1lo,$T1carry:$T1lo ; T1 += h
|| SHRU $Ehi,14,$S1hi
|| SHL $Ehi,32-14,$S1lo
loop16_79?:
XOR $Fhi,$Ghi,$CHhi
|| XOR $Flo,$Glo,$CHlo
|| ADD KHI,$T1hi,$T1hi
|| ADDU KLO,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += K512[i]
|| SHRU $Elo,14,$t0lo
|| SHL $Elo,32-14,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| AND $Ehi,$CHhi,$CHhi
|| AND $Elo,$CHlo,$CHlo
|| ROTL $Ghi,0,$Hhi
|| ROTL $Glo,0,$Hlo ; h = g
|| SHRU $Ehi,18,$t0hi
|| SHL $Ehi,32-18,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| XOR $Ghi,$CHhi,$CHhi
|| XOR $Glo,$CHlo,$CHlo ; Ch(e,f,g) = ((f^g)&e)^g
|| ROTL $Fhi,0,$Ghi
|| ROTL $Flo,0,$Glo ; g = f
|| SHRU $Elo,18,$t0lo
|| SHL $Elo,32-18,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| OR $Ahi,$Bhi,$MAJhi
|| OR $Alo,$Blo,$MAJlo
|| ROTL $Ehi,0,$Fhi
|| ROTL $Elo,0,$Flo ; f = e
|| SHRU $Ehi,41-32,$t0lo
|| SHL $Ehi,64-41,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| AND $Chi,$MAJhi,$MAJhi
|| AND $Clo,$MAJlo,$MAJlo
|| ROTL $Dhi,0,$Ehi
|| ROTL $Dlo,0,$Elo ; e = d
|| SHRU $Elo,41-32,$t0hi
|| SHL $Elo,64-41,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo ; Sigma1(e)
|| AND $Ahi,$Bhi,$t1hi
|| AND $Alo,$Blo,$t1lo
|| ROTL $Chi,0,$Dhi
|| ROTL $Clo,0,$Dlo ; d = c
|| SHRU $Ahi,28,$S0hi
|| SHL $Ahi,32-28,$S0lo
OR $t1hi,$MAJhi,$MAJhi
|| OR $t1lo,$MAJlo,$MAJlo ; Maj(a,b,c) = ((a|b)&c)|(a&b)
|| ADD $CHhi,$T1hi,$T1hi
|| ADDU $CHlo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Ch(e,f,g)
|| ROTL $Bhi,0,$Chi
|| ROTL $Blo,0,$Clo ; c = b
|| SHRU $Alo,28,$t0lo
|| SHL $Alo,32-28,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $S1hi,$T1hi,$T1hi
|| ADDU $S1lo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += Sigma1(e)
|| ROTL $Ahi,0,$Bhi
|| ROTL $Alo,0,$Blo ; b = a
|| SHRU $Ahi,34-32,$t0lo
|| SHL $Ahi,64-34,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $MAJhi,$T1hi,$T2hi
|| ADDU $MAJlo,$T1carry:$T1lo,$T2carry:$T2lo ; T2 = T1+Maj(a,b,c)
|| SHRU $Alo,34-32,$t0hi
|| SHL $Alo,64-34,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $Ehi,$T1hi,$T1hi
|| ADDU $Elo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += e
|| SHRU $Ahi,39-32,$t0lo
|| SHL $Ahi,64-39,$t0hi
[B0] BNOP loop0_15?
|| [B0] LDNDW *$INP++,B11:B10 ; pre-fetch input
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| SHRU $Alo,39-32,$t0hi
|| SHL $Alo,64-39,$t0lo
||[!B0] LDW *${Xihi}[28],$T1hi
||[!B0] LDW *${Xilo}[28],$T1lo ; X[i+14]
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo ; Sigma0(a)
|| ADD $T1carry,$T1hi,$Ehi
|| ROTL $T1lo,0,$Elo ; e = T1, "ghost" value
||[!B1] BNOP break?
ADD $S0hi,$T2hi,$T2hi
|| ADDU $S0lo,$T2carry:$T2lo,$T2carry:$T2lo ; T2 += Sigma0(a)
|| [B1] LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[i]
NOP ; avoid cross-path stall
ADD $T2carry,$T2hi,$Ahi
|| MV $T2lo,$Alo ; a = T2
|| [B0] SUB B0,1,B0
;;===== branch to loop00_15? is taken here
[B1] LDW *${Xihi}[2],$T2hi
|| [B1] LDW *${Xilo}[2],$T2lo ; X[i+1]
|| [B1] SHRU $T1hi,19,$S1hi
|| [B1] SHL $T1hi,32-19,$S1lo
[B1] SHRU $T1lo,19,$t0lo
|| [B1] SHL $T1lo,32-19,$t0hi
;;===== branch to break? is taken here
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1hi,61-32,$t0lo
|| SHL $T1hi,64-61,$t0hi
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1lo,61-32,$t0hi
|| SHL $T1lo,64-61,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1hi,6,$t0hi
|| SHL $T1hi,32-6,$t0lo
XOR $t0hi,$S1hi,$S1hi
|| XOR $t0lo,$S1lo,$S1lo
|| SHRU $T1lo,6,$t0lo
|| LDW *${Xihi}[18],$T1hi
|| LDW *${Xilo}[18],$T1lo ; X[i+9]
XOR $t0lo,$S1lo,$S1lo ; sigma1(Xi[i+14])
|| LDW *${Xihi}[0],$CHhi
|| LDW *${Xilo}[0],$CHlo ; X[i]
|| SHRU $T2hi,1,$S0hi
|| SHL $T2hi,32-1,$S0lo
SHRU $T2lo,1,$t0lo
|| SHL $T2lo,32-1,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| SHRU $T2hi,8,$t0hi
|| SHL $T2hi,32-8,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| SHRU $T2lo,8,$t0lo
|| SHL $T2lo,32-8,$t0hi
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $S1hi,$T1hi,$T1hi
|| ADDU $S1lo,$T1lo,$T1carry:$T1lo ; T1 = X[i+9]+sigma1()
|| SHRU $T2hi,7,$t0hi
|| SHL $T2hi,32-7,$t0lo
XOR $t0hi,$S0hi,$S0hi
|| XOR $t0lo,$S0lo,$S0lo
|| ADD $CHhi,$T1hi,$T1hi
|| ADDU $CHlo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += X[i]
|| SHRU $T2lo,7,$t0lo
|| [B1] BNOP loop16_79?
XOR $t0lo,$S0lo,$S0lo ; sigma0(Xi[i+1]
ADD $S0hi,$T1hi,$T1hi
|| ADDU $S0lo,$T1carry:$T1lo,$T1carry:$T1lo ; T1 += sigma0()
|| [B1] SUB B1,1,B1
NOP ; avoid cross-path stall
ADD $T1carry,$T1hi,$T1hi
STW $T1hi,*$Xihi++[2] ; copied "top" bundle
|| STW $T1lo,*$Xilo++[2] ; X[i] = T1
|| ADD $Hhi,$T1hi,$T1hi
|| ADDU $Hlo,$T1lo,$T1carry:$T1lo ; T1 += h
|| SHRU $Ehi,14,$S1hi
|| SHL $Ehi,32-14,$S1lo
;;===== branch to loop16_79? is taken here
break?:
ADD $Ahi,$Actxhi,$Ahi ; accumulate ctx
|| ADDU $Alo,$Actxlo,$Actxlo:$Alo
|| [A0] LDNDW *$INP++,B11:B10 ; pre-fetch input
|| [A0] ADDK -640,$K512 ; rewind pointer to K512
ADD $Bhi,$Bctxhi,$Bhi
|| ADDU $Blo,$Bctxlo,$Bctxlo:$Blo
|| [A0] LDDW *$K512++,$Khi:$Klo ; pre-fetch K512[0]
ADD $Chi,$Cctxhi,$Chi
|| ADDU $Clo,$Cctxlo,$Cctxlo:$Clo
|| ADD $Actxlo,$Ahi,$Ahi
||[!A0] MV $CTXA,$CTXB
ADD $Dhi,$Dctxhi,$Dhi
|| ADDU $Dlo,$Dctxlo,$Dctxlo:$Dlo
|| ADD $Bctxlo,$Bhi,$Bhi
||[!A0] STW $Ahi,*${CTXA}[0^.LITTLE_ENDIAN] ; save ctx
||[!A0] STW $Alo,*${CTXB}[1^.LITTLE_ENDIAN]
ADD $Ehi,$Ectxhi,$Ehi
|| ADDU $Elo,$Ectxlo,$Ectxlo:$Elo
|| ADD $Cctxlo,$Chi,$Chi
|| [A0] BNOP outerloop?
||[!A0] STW $Bhi,*${CTXA}[2^.LITTLE_ENDIAN]
||[!A0] STW $Blo,*${CTXB}[3^.LITTLE_ENDIAN]
ADD $Fhi,$Fctxhi,$Fhi
|| ADDU $Flo,$Fctxlo,$Fctxlo:$Flo
|| ADD $Dctxlo,$Dhi,$Dhi
||[!A0] STW $Chi,*${CTXA}[4^.LITTLE_ENDIAN]
||[!A0] STW $Clo,*${CTXB}[5^.LITTLE_ENDIAN]
ADD $Ghi,$Gctxhi,$Ghi
|| ADDU $Glo,$Gctxlo,$Gctxlo:$Glo
|| ADD $Ectxlo,$Ehi,$Ehi
||[!A0] STW $Dhi,*${CTXA}[6^.LITTLE_ENDIAN]
||[!A0] STW $Dlo,*${CTXB}[7^.LITTLE_ENDIAN]
ADD $Hhi,$Hctxhi,$Hhi
|| ADDU $Hlo,$Hctxlo,$Hctxlo:$Hlo
|| ADD $Fctxlo,$Fhi,$Fhi
||[!A0] STW $Ehi,*${CTXA}[8^.LITTLE_ENDIAN]
||[!A0] STW $Elo,*${CTXB}[9^.LITTLE_ENDIAN]
ADD $Gctxlo,$Ghi,$Ghi
||[!A0] STW $Fhi,*${CTXA}[10^.LITTLE_ENDIAN]
||[!A0] STW $Flo,*${CTXB}[11^.LITTLE_ENDIAN]
ADD $Hctxlo,$Hhi,$Hhi
||[!A0] STW $Ghi,*${CTXA}[12^.LITTLE_ENDIAN]
||[!A0] STW $Glo,*${CTXB}[13^.LITTLE_ENDIAN]
;;===== branch to outerloop? is taken here
STW $Hhi,*${CTXA}[14^.LITTLE_ENDIAN]
|| STW $Hlo,*${CTXB}[15^.LITTLE_ENDIAN]
|| MVK -40,B0
ADD FP,B0,SP ; destroy circular buffer
|| LDDW *FP[-4],A11:A10
LDDW *SP[2],A13:A12
|| LDDW *FP[-2],B11:B10
LDDW *SP[4],B13:B12
|| BNOP RA
LDW *++SP(40),FP ; restore frame pointer
MVK 0,B0
MVC B0,AMR ; clear AMR
NOP 2 ; wait till FP is committed
.endasmfunc
.if __TI_EABI__
.sect ".text:sha_asm.const"
.else
.sect ".const:sha_asm"
.endif
.align 128
K512:
.uword 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
.uword 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
.uword 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
.uword 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
.uword 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
.uword 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
.uword 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
.uword 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
.uword 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
.uword 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
.uword 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
.uword 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
.uword 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
.uword 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
.uword 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
.uword 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
.uword 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
.uword 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
.uword 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
.uword 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
.uword 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
.uword 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
.uword 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
.uword 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
.uword 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
.uword 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
.uword 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
.uword 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
.uword 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
.uword 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
.uword 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
.uword 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
.uword 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
.uword 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
.uword 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
.uword 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
.uword 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
.uword 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
.uword 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
.uword 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
.cstring "SHA512 block transform for C64x, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT;