diff --git a/crypto/ec/ec_cvt.c b/crypto/ec/ec_cvt.c index 0c3b93a43b..a99d762d3b 100644 --- a/crypto/ec/ec_cvt.c +++ b/crypto/ec/ec_cvt.c @@ -80,10 +80,29 @@ EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM const EC_METHOD *meth; EC_GROUP *ret; +#if defined(OPENSSL_BN_ASM_MONT) && !defined(__sparc) + /* + * This might appear controversial, but the fact is that generic + * prime method was observed to deliver better performance even + * for NIST primes on a range of platforms, e.g.: 60%-15% + * improvement on IA-64, 50%-20% on ARM, 30%-90% on P4, 20%-25% + * in 32-bit build and 35%--12% in 64-bit build on Core2... + * Coefficients are relative to optimized bn_nist.c for most + * intensive ECDSA verify and ECDH operations for 192- and 521- + * bit keys respectively. What effectively happens is that loop + * with bn_mul_add_words is put against bn_mul_mont, and latter + * wins on short vectors. Correct solution should be implementing + * dedicated NxN multiplication subroutines for small N. But till + * it materializes, let's stick to generic prime method... + * + */ + meth = EC_GFp_mont_method(); +#else if (BN_nist_mod_func(p)) meth = EC_GFp_nist_method(); else meth = EC_GFp_mont_method(); +#endif ret = EC_GROUP_new(meth); if (ret == NULL)