x86_64 performance patch.

This commit is contained in:
Andy Polyakov 2002-12-14 20:45:39 +00:00
parent 7e201e9f73
commit 1cbdbcd587
12 changed files with 111 additions and 85 deletions

View file

@ -391,7 +391,7 @@ my %table=(
"linux-s390", "gcc:-DB_ENDIAN -DTERMIO -DNO_ASM -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-s390x", "gcc:-DB_ENDIAN -DTERMIO -DNO_ASM -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-ia64", "gcc:-DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR:asm/ia64.o:::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-x86_64", "gcc:-DL_ENDIAN -DNO_ASM ::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o:::::::::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-sparc", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -mv8 -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-m68", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-x86", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -m486 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",

View file

@ -274,10 +274,7 @@ do_gnu-shared:
libs="$$libs -l$$i"; \
done
DETECT_GNU_LD=${CC} -v 2>&1 | grep '^gcc' >/dev/null 2>&1 && \
my_ld=`${CC} -print-prog-name=ld 2>&1` && \
[ -n "$$my_ld" ] && \
$$my_ld -v 2>&1 | grep 'GNU ld' >/dev/null 2>&1
DETECT_GNU_LD=(${CC} -Wl,-V /dev/null 2>&1 | grep '^GNU ld' )>/dev/null
# For Darwin AKA Mac OS/X (dyld)
do_darwin-shared:

View file

@ -136,6 +136,8 @@ asm/ia64-cpp.o: asm/ia64.S
$(CC) $(ASFLAGS) -c -o asm/ia64-cpp.o /tmp/ia64.$$$$.s; \
rm -f /tmp/ia64.$$$$.s
asm/x86_64-gcc.o: asm/x86_64-gcc.c
files:
$(PERL) $(TOP)/util/files.pl Makefile.ssl >> $(TOP)/MINFO

View file

@ -150,6 +150,20 @@ int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
q; \
})
# define REMAINDER_IS_ALREADY_CALCULATED
# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
/*
* Same story here, but it's 128-bit by 64-bit division. Wow!
* <appro@fy.chalmers.se>
*/
# define bn_div_words(n0,n1,d0) \
({ asm volatile ( \
"divq %4" \
: "=a"(q), "=d"(rem) \
: "a"(n1), "d"(n0), "g"(d0) \
: "cc"); \
q; \
})
# define REMAINDER_IS_ALREADY_CALCULATED
# endif /* __<cpu> */
# endif /* __GNUC__ */
#endif /* OPENSSL_NO_ASM */
@ -296,7 +310,9 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
rem=(n1-q*d0)&BN_MASK2;
#endif
#ifdef BN_UMULT_HIGH
#if defined(BN_UMULT_LOHI)
BN_UMULT_LOHI(t2l,t2h,d1,q);
#elif defined(BN_UMULT_HIGH)
t2l = d1 * q;
t2h = BN_UMULT_HIGH(d1,q);
#else

View file

@ -230,6 +230,21 @@ struct bignum_ctx
: "r"(a), "r"(b)); \
ret; })
# endif /* compiler */
# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
# if defined(__GNUC__)
# define BN_UMULT_HIGH(a,b) ({ \
register BN_ULONG ret,discard; \
asm ("mulq %3" \
: "=a"(discard),"=d"(ret) \
: "a"(a), "g"(b) \
: "cc"); \
ret; })
# define BN_UMULT_LOHI(low,high,a,b) \
asm ("mulq %3" \
: "=a"(low),"=d"(high) \
: "a"(a),"g"(b) \
: "cc");
# endif
# endif /* cpu */
#endif /* OPENSSL_NO_ASM */
@ -337,7 +352,7 @@ struct bignum_ctx
#define LBITS(a) ((a)&BN_MASK2l)
#define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l)
#define L2HBITS(a) ((BN_ULONG)((a)&BN_MASK2l)<<BN_BITS4)
#define L2HBITS(a) (((a)<<BN_BITS4)&BN_MASK2)
#define LLBITS(a) ((a)&BN_MASKl)
#define LHBITS(a) (((a)>>BN_BITS2)&BN_MASKl)

View file

@ -162,6 +162,16 @@
#if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)
#define ROTATE(a,n) (_lrotr(a,n))
#elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ("rorl %1,%0" \
: "=r"(ret) \
: "I"(n),"0"(a) \
: "cc"); \
ret; \
})
# endif
#else
#define ROTATE(a,n) (((a)>>(n))+((a)<<(32-(n))))
#endif

View file

@ -198,7 +198,7 @@
*
* <appro@fy.chalmers.se>
*/
# if defined(__i386) || defined(__i386__)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"roll %1,%0" \
@ -224,7 +224,7 @@
*/
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
# if (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
# if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY)
# define BE_FETCH32(a) ({ register unsigned int l=(a);\
asm ( \
"bswapl %0" \
@ -610,3 +610,28 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c)
*/
return 1;
}
#ifndef MD32_REG_T
#define MD32_REG_T long
/*
* This comment was originaly written for MD5, which is why it
* discusses A-D. But it basically applies to all 32-bit digests,
* which is why it was moved to common header file.
*
* In case you wonder why A-D are declared as long and not
* as MD5_LONG. Doing so results in slight performance
* boost on LP64 architectures. The catch is we don't
* really care if 32 MSBs of a 64-bit register get polluted
* with eventual overflows as we *save* only 32 LSBs in
* *either* case. Now declaring 'em long excuses the compiler
* from keeping 32 MSBs zeroed resulting in 13% performance
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
* Well, to be honest it should say that this *prevents*
* performance degradation.
* <appro@fy.chalmers.se>
* Apparently there're LP64 compilers that generate better
* code if A-D are declared int. Most notably GCC-x86_64
* generates better code.
* <appro@fy.chalmers.se>
*/
#endif

View file

@ -86,21 +86,7 @@ int MD4_Init(MD4_CTX *c)
void md4_block_host_order (MD4_CTX *c, const void *data, int num)
{
const MD4_LONG *X=data;
register unsigned long A,B,C,D;
/*
* In case you wonder why A-D are declared as long and not
* as MD4_LONG. Doing so results in slight performance
* boost on LP64 architectures. The catch is we don't
* really care if 32 MSBs of a 64-bit register get polluted
* with eventual overflows as we *save* only 32 LSBs in
* *either* case. Now declaring 'em long excuses the compiler
* from keeping 32 MSBs zeroed resulting in 13% performance
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
* Well, to be honest it should say that this *prevents*
* performance degradation.
*
* <appro@fy.chalmers.se>
*/
register unsigned MD32_REG_T A,B,C,D;
A=c->A;
B=c->B;
@ -176,25 +162,11 @@ void md4_block_host_order (MD4_CTX *c, const void *data, int num)
void md4_block_data_order (MD4_CTX *c, const void *data_, int num)
{
const unsigned char *data=data_;
register unsigned long A,B,C,D,l;
/*
* In case you wonder why A-D are declared as long and not
* as MD4_LONG. Doing so results in slight performance
* boost on LP64 architectures. The catch is we don't
* really care if 32 MSBs of a 64-bit register get polluted
* with eventual overflows as we *save* only 32 LSBs in
* *either* case. Now declaring 'em long excuses the compiler
* from keeping 32 MSBs zeroed resulting in 13% performance
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
* Well, to be honest it should say that this *prevents*
* performance degradation.
*
* <appro@fy.chalmers.se>
*/
register unsigned MD32_REG_T A,B,C,D,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
MD4_LONG XX[MD4_LBLOCK];

View file

@ -86,21 +86,7 @@ int MD5_Init(MD5_CTX *c)
void md5_block_host_order (MD5_CTX *c, const void *data, int num)
{
const MD5_LONG *X=data;
register unsigned long A,B,C,D;
/*
* In case you wonder why A-D are declared as long and not
* as MD5_LONG. Doing so results in slight performance
* boost on LP64 architectures. The catch is we don't
* really care if 32 MSBs of a 64-bit register get polluted
* with eventual overflows as we *save* only 32 LSBs in
* *either* case. Now declaring 'em long excuses the compiler
* from keeping 32 MSBs zeroed resulting in 13% performance
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
* Well, to be honest it should say that this *prevents*
* performance degradation.
*
* <appro@fy.chalmers.se>
*/
register unsigned MD32_REG_T A,B,C,D;
A=c->A;
B=c->B;
@ -193,25 +179,11 @@ void md5_block_host_order (MD5_CTX *c, const void *data, int num)
void md5_block_data_order (MD5_CTX *c, const void *data_, int num)
{
const unsigned char *data=data_;
register unsigned long A,B,C,D,l;
/*
* In case you wonder why A-D are declared as long and not
* as MD5_LONG. Doing so results in slight performance
* boost on LP64 architectures. The catch is we don't
* really care if 32 MSBs of a 64-bit register get polluted
* with eventual overflows as we *save* only 32 LSBs in
* *either* case. Now declaring 'em long excuses the compiler
* from keeping 32 MSBs zeroed resulting in 13% performance
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
* Well, to be honest it should say that this *prevents*
* performance degradation.
*
* <appro@fy.chalmers.se>
*/
register unsigned MD32_REG_T A,B,C,D,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
MD5_LONG XX[MD5_LBLOCK];

View file

@ -149,6 +149,23 @@
#if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)
#define ROTATE_l32(a,n) _lrotl(a,n)
#define ROTATE_r32(a,n) _lrotr(a,n)
#elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE_l32(a,n) ({ register unsigned int ret; \
asm ("roll %%cl,%0" \
: "=r"(ret) \
: "c"(n),"0"(a) \
: "cc"); \
ret; \
})
# define ROTATE_r32(a,n) ({ register unsigned int ret; \
asm ("rorl %%cl,%0" \
: "=r"(ret) \
: "c"(n),"0"(a) \
: "cc"); \
ret; \
})
# endif
#else
#define ROTATE_l32(a,n) (((a)<<(n&0x1f))|(((a)&0xffffffff)>>(32-(n&0x1f))))
#define ROTATE_r32(a,n) (((a)<<(32-(n&0x1f)))|(((a)&0xffffffff)>>(n&0x1f)))

View file

@ -90,8 +90,8 @@ int RIPEMD160_Init(RIPEMD160_CTX *c)
void ripemd160_block_host_order (RIPEMD160_CTX *ctx, const void *p, int num)
{
const RIPEMD160_LONG *XX=p;
register unsigned long A,B,C,D,E;
register unsigned long a,b,c,d,e;
register unsigned MD32_REG_T A,B,C,D,E;
register unsigned MD32_REG_T a,b,c,d,e;
for (;num--;XX+=HASH_LBLOCK)
{
@ -290,12 +290,12 @@ void ripemd160_block_host_order (RIPEMD160_CTX *ctx, const void *p, int num)
void ripemd160_block_data_order (RIPEMD160_CTX *ctx, const void *p, int num)
{
const unsigned char *data=p;
register unsigned long A,B,C,D,E;
unsigned long a,b,c,d,e,l;
register unsigned MD32_REG_T A,B,C,D,E;
unsigned MD32_REG_T a,b,c,d,e,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
RIPEMD160_LONG XX[16];

View file

@ -224,10 +224,10 @@ int HASH_INIT (SHA_CTX *c)
void HASH_BLOCK_HOST_ORDER (SHA_CTX *c, const void *d, int num)
{
const SHA_LONG *W=d;
register unsigned long A,B,C,D,E,T;
register unsigned MD32_REG_T A,B,C,D,E,T;
#ifndef MD32_XARRAY
unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
#else
SHA_LONG XX[16];
#endif
@ -349,10 +349,10 @@ void HASH_BLOCK_HOST_ORDER (SHA_CTX *c, const void *d, int num)
void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, int num)
{
const unsigned char *data=p;
register unsigned long A,B,C,D,E,T,l;
register unsigned MD32_REG_T A,B,C,D,E,T,l;
#ifndef MD32_XARRAY
unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
#else
SHA_LONG XX[16];
#endif