[PATCH] Clean-up implementation selection for SHA1 and SHA2
Jussi Kivilinna
jussi.kivilinna at iki.fi
Tue Jun 19 21:31:16 CEST 2018
* cipher/sha1.c (ASM_EXTRA_STACK): Increase by sizeof(void*)*4.
(do_sha1_transform_amd64_ssse3, do_sha1_transform_amd64_avx)
(do_sha1_transform_amd64_avx_bmi2, do_sha1_transform_intel_shaext)
(do_sha1_transform_armv7_neon, do_sha1_transform_armv8_ce): New.
(transform_blk, transform): Merge to ...
(do_transform_generic): ... this and remove calls to assembly
implementations.
(sha1_init): Select hd->bctx.bwrite based on HW features.
(_gcry_sha1_mixblock, sha1_final): Call hd->bctx.bwrite instead of
transform.
* cipher/sha1.h (SHA1_CONTEXT): Remove implementation selection bits.
* cipher/sha256.h (SHA256_CONTEXT): Remove implementation selection
bits.
(ASM_EXTRA_STACK): Increase by sizeof(void*)*4.
(do_sha256_transform_amd64_ssse3, do_sha256_transform_amd64_avx)
(do_sha256_transform_amd64_avx2, do_sha256_transform_intel_shaext)
(do_sha256_transform_armv8_ce): New.
(transform_blk, transform): Merge to ...
(do_transform_generic): ... this and remove calls to assembly
implementations.
(sha256_init, sha224_init): Select hd->bctx.bwrite based on HW
features.
(sha256_final): Call hd->bctx.bwrite instead of transform.
* cipher/sha512-armv7-neon.S
(_gcry_sha512_transform_armv7_neon): Return zero.
* cipher/sha512.h (SHA512_CONTEXT): Remove implementation selection
bits.
(ASM_EXTRA_STACK): Increase by sizeof(void*)*4.
(do_sha512_transform_armv7_neon, do_sha512_transform_amd64_ssse3)
(do_sha512_transform_amd64_avx, do_sha512_transform_amd64_avx2): New.
[USE_ARM_ASM] (do_transform_generic): New.
(transform_blk, transform): Merge to ...
[!USE_ARM_ASM] (do_transform_generic): ... this and remove calls to
assembly implementations.
(sha512_init, sha384_init): Select hd->bctx.bwrite based on HW
features.
(sha512_final): Call hd->bctx.bwrite instead of transform.
--
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
cipher/sha1.c | 277 +++++++------
cipher/sha1.h | 6 -
cipher/sha256.c | 435 ++++++++++----------
cipher/sha512-armv7-neon.S | 1 +
cipher/sha512.c | 796 ++++++++++++++++++-------------------
5 files changed, 722 insertions(+), 793 deletions(-)
diff --git a/cipher/sha1.c b/cipher/sha1.c
index e50262ff4..76c486c7e 100644
--- a/cipher/sha1.c
+++ b/cipher/sha1.c
@@ -111,8 +111,114 @@
/* #endif */
+
+/* Assembly implementations use SystemV ABI, ABI conversion and additional
+ * stack to store XMM6-XMM15 needed on Win64. */
+#undef ASM_FUNC_ABI
+#undef ASM_EXTRA_STACK
+#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_BMI2) || \
+ defined(USE_SHAEXT)
+# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
+# define ASM_FUNC_ABI __attribute__((sysv_abi))
+# define ASM_EXTRA_STACK (10 * 16 + sizeof(void *) * 4)
+# else
+# define ASM_FUNC_ABI
+# define ASM_EXTRA_STACK 0
+# endif
+#endif
+
+
+#ifdef USE_SSSE3
+unsigned int
+_gcry_sha1_transform_amd64_ssse3 (void *state, const unsigned char *data,
+ size_t nblks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_amd64_ssse3 (&hd->h0, data, nblks)
+ + ASM_EXTRA_STACK;
+}
+#endif
+
+#ifdef USE_AVX
+unsigned int
+_gcry_sha1_transform_amd64_avx (void *state, const unsigned char *data,
+ size_t nblks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha1_transform_amd64_avx (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_amd64_avx (&hd->h0, data, nblks)
+ + ASM_EXTRA_STACK;
+}
+#endif
+
+#ifdef USE_BMI2
+unsigned int
+_gcry_sha1_transform_amd64_avx_bmi2 (void *state, const unsigned char *data,
+ size_t nblks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, nblks)
+ + ASM_EXTRA_STACK;
+}
+#endif
+
+#ifdef USE_SHAEXT
+/* Does not need ASM_FUNC_ABI */
+unsigned int
+_gcry_sha1_transform_intel_shaext (void *state, const unsigned char *data,
+ size_t nblks);
+
static unsigned int
-transform (void *c, const unsigned char *data, size_t nblks);
+do_sha1_transform_intel_shaext (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_intel_shaext (&hd->h0, data, nblks);
+}
+#endif
+
+#ifdef USE_NEON
+unsigned int
+_gcry_sha1_transform_armv7_neon (void *state, const unsigned char *data,
+ size_t nblks);
+
+static unsigned int
+do_sha1_transform_armv7_neon (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_armv7_neon (&hd->h0, data, nblks);
+}
+#endif
+
+#ifdef USE_ARM_CE
+unsigned int
+_gcry_sha1_transform_armv8_ce (void *state, const unsigned char *data,
+ size_t nblks);
+
+static unsigned int
+do_sha1_transform_armv8_ce (void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA1_CONTEXT *hd = ctx;
+ return _gcry_sha1_transform_armv8_ce (&hd->h0, data, nblks);
+}
+#endif
+
+
+static unsigned int
+do_transform_generic (void *c, const unsigned char *data, size_t nblks);
static void
@@ -133,29 +239,38 @@ sha1_init (void *context, unsigned int flags)
hd->bctx.nblocks_high = 0;
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
- hd->bctx.bwrite = transform;
+ /* Order of feature checks is important here; last match will be
+ * selected. Keep slower implementations at the top and faster at
+ * the bottom. */
+ hd->bctx.bwrite = do_transform_generic;
#ifdef USE_SSSE3
- hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+ if ((features & HWF_INTEL_SSSE3) != 0)
+ hd->bctx.bwrite = do_sha1_transform_amd64_ssse3;
#endif
#ifdef USE_AVX
/* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
* Therefore use this implementation on Intel CPUs only. */
- hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
+ hd->bctx.bwrite = do_sha1_transform_amd64_avx;
#endif
#ifdef USE_BMI2
- hd->use_bmi2 = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_BMI2);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_BMI2))
+ hd->bctx.bwrite = do_sha1_transform_amd64_avx_bmi2;
#endif
#ifdef USE_SHAEXT
- hd->use_shaext = (features & HWF_INTEL_SHAEXT)
- && (features & HWF_INTEL_SSE4_1);
+ if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1))
+ hd->bctx.bwrite = do_sha1_transform_intel_shaext;
#endif
#ifdef USE_NEON
- hd->use_neon = (features & HWF_ARM_NEON) != 0;
+ if ((features & HWF_ARM_NEON) != 0)
+ hd->bctx.bwrite = do_sha1_transform_armv7_neon;
#endif
#ifdef USE_ARM_CE
- hd->use_arm_ce = (features & HWF_ARM_SHA1) != 0;
+ if ((features & HWF_ARM_SHA1) != 0)
+ hd->bctx.bwrite = do_sha1_transform_armv8_ce;
#endif
+
(void)features;
}
@@ -192,30 +307,20 @@ _gcry_sha1_mixblock_init (SHA1_CONTEXT *hd)
b = rol( b, 30 ); \
} while(0)
-
-#ifdef USE_NEON
-unsigned int
-_gcry_sha1_transform_armv7_neon (void *state, const unsigned char *data,
- size_t nblks);
-#endif
-
-#ifdef USE_ARM_CE
-unsigned int
-_gcry_sha1_transform_armv8_ce (void *state, const unsigned char *data,
- size_t nblks);
-#endif
-
/*
* Transform NBLOCKS of each 64 bytes (16 32-bit words) at DATA.
*/
static unsigned int
-transform_blk (void *ctx, const unsigned char *data)
+do_transform_generic (void *ctx, const unsigned char *data, size_t nblks)
{
SHA1_CONTEXT *hd = ctx;
- const u32 *idata = (const void *)data;
- register u32 a, b, c, d, e; /* Local copies of the chaining variables. */
- register u32 tm; /* Helper. */
- u32 x[16]; /* The array we work on. */
+
+ do
+ {
+ const u32 *idata = (const void *)data;
+ u32 a, b, c, d, e; /* Local copies of the chaining variables. */
+ u32 tm; /* Helper. */
+ u32 x[16]; /* The array we work on. */
#define I(i) (x[i] = buf_get_be32(idata + i))
@@ -315,123 +420,11 @@ transform_blk (void *ctx, const unsigned char *data)
hd->h3 += d;
hd->h4 += e;
- return /* burn_stack */ 88+4*sizeof(void*);
-}
-
-
-/* Assembly implementations use SystemV ABI, ABI conversion and additional
- * stack to store XMM6-XMM15 needed on Win64. */
-#undef ASM_FUNC_ABI
-#undef ASM_EXTRA_STACK
-#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_BMI2) || \
- defined(USE_SHAEXT)
-# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
-# define ASM_FUNC_ABI __attribute__((sysv_abi))
-# define ASM_EXTRA_STACK (10 * 16)
-# else
-# define ASM_FUNC_ABI
-# define ASM_EXTRA_STACK 0
-# endif
-#endif
-
-
-#ifdef USE_SSSE3
-unsigned int
-_gcry_sha1_transform_amd64_ssse3 (void *state, const unsigned char *data,
- size_t nblks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_AVX
-unsigned int
-_gcry_sha1_transform_amd64_avx (void *state, const unsigned char *data,
- size_t nblks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_BMI2
-unsigned int
-_gcry_sha1_transform_amd64_avx_bmi2 (void *state, const unsigned char *data,
- size_t nblks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_SHAEXT
-/* Does not need ASM_FUNC_ABI */
-unsigned int
-_gcry_sha1_transform_intel_shaext (void *state, const unsigned char *data,
- size_t nblks);
-#endif
-
-
-static unsigned int
-transform (void *ctx, const unsigned char *data, size_t nblks)
-{
- SHA1_CONTEXT *hd = ctx;
- unsigned int burn;
-
-#ifdef USE_SHAEXT
- if (hd->use_shaext)
- {
- burn = _gcry_sha1_transform_intel_shaext (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-#ifdef USE_BMI2
- if (hd->use_bmi2)
- {
- burn = _gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-#ifdef USE_AVX
- if (hd->use_avx)
- {
- burn = _gcry_sha1_transform_amd64_avx (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-#ifdef USE_SSSE3
- if (hd->use_ssse3)
- {
- burn = _gcry_sha1_transform_amd64_ssse3 (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-#ifdef USE_ARM_CE
- if (hd->use_arm_ce)
- {
- burn = _gcry_sha1_transform_armv8_ce (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) : 0;
- return burn;
- }
-#endif
-#ifdef USE_NEON
- if (hd->use_neon)
- {
- burn = _gcry_sha1_transform_armv7_neon (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) : 0;
- return burn;
- }
-#endif
-
- do
- {
- burn = transform_blk (hd, data);
data += 64;
}
while (--nblks);
-#ifdef ASM_EXTRA_STACK
- /* 'transform_blk' is typically inlined and XMM6-XMM15 are stored at
- * the prologue of this function. Therefore need to add ASM_EXTRA_STACK to
- * here too.
- */
- burn += ASM_EXTRA_STACK;
-#endif
-
- return burn;
+ return 88+4*sizeof(void*);
}
@@ -451,7 +444,7 @@ _gcry_sha1_mixblock (SHA1_CONTEXT *hd, void *blockof64byte)
u32 *p = blockof64byte;
unsigned int nburn;
- nburn = transform (hd, blockof64byte, 1);
+ nburn = (*hd->bctx.bwrite) (hd, blockof64byte, 1);
p[0] = hd->h0;
p[1] = hd->h1;
p[2] = hd->h2;
@@ -515,7 +508,7 @@ sha1_final(void *context)
/* append the 64 bit count */
buf_put_be32(hd->bctx.buf + 56, msb);
buf_put_be32(hd->bctx.buf + 60, lsb);
- burn = transform( hd, hd->bctx.buf, 1 );
+ burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 1 );
_gcry_burn_stack (burn);
p = hd->bctx.buf;
diff --git a/cipher/sha1.h b/cipher/sha1.h
index 93ce79b5c..acf764baa 100644
--- a/cipher/sha1.h
+++ b/cipher/sha1.h
@@ -26,12 +26,6 @@ typedef struct
{
gcry_md_block_ctx_t bctx;
u32 h0,h1,h2,h3,h4;
- unsigned int use_ssse3:1;
- unsigned int use_avx:1;
- unsigned int use_bmi2:1;
- unsigned int use_shaext:1;
- unsigned int use_neon:1;
- unsigned int use_arm_ce:1;
} SHA1_CONTEXT;
diff --git a/cipher/sha256.c b/cipher/sha256.c
index 069597074..e82a9d902 100644
--- a/cipher/sha256.c
+++ b/cipher/sha256.c
@@ -102,26 +102,103 @@
typedef struct {
gcry_md_block_ctx_t bctx;
u32 h0,h1,h2,h3,h4,h5,h6,h7;
+} SHA256_CONTEXT;
+
+
+/* Assembly implementations use SystemV ABI, ABI conversion and additional
+ * stack to store XMM6-XMM15 needed on Win64. */
+#undef ASM_FUNC_ABI
+#undef ASM_EXTRA_STACK
+#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2) || \
+ defined(USE_SHAEXT)
+# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
+# define ASM_FUNC_ABI __attribute__((sysv_abi))
+# define ASM_EXTRA_STACK (10 * 16 + sizeof(void *) * 4)
+# else
+# define ASM_FUNC_ABI
+# define ASM_EXTRA_STACK 0
+# endif
+#endif
+
+
#ifdef USE_SSSE3
- unsigned int use_ssse3:1;
+unsigned int _gcry_sha256_transform_amd64_ssse3(const void *input_data,
+ u32 state[8],
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha256_transform_amd64_ssse3(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA256_CONTEXT *hd = ctx;
+ return _gcry_sha256_transform_amd64_ssse3 (data, &hd->h0, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
+
#ifdef USE_AVX
- unsigned int use_avx:1;
+unsigned int _gcry_sha256_transform_amd64_avx(const void *input_data,
+ u32 state[8],
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha256_transform_amd64_avx(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA256_CONTEXT *hd = ctx;
+ return _gcry_sha256_transform_amd64_avx (data, &hd->h0, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
+
#ifdef USE_AVX2
- unsigned int use_avx2:1;
+unsigned int _gcry_sha256_transform_amd64_avx2(const void *input_data,
+ u32 state[8],
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha256_transform_amd64_avx2(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA256_CONTEXT *hd = ctx;
+ return _gcry_sha256_transform_amd64_avx2 (data, &hd->h0, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
+
#ifdef USE_SHAEXT
- unsigned int use_shaext:1;
+/* Does not need ASM_FUNC_ABI */
+unsigned int
+_gcry_sha256_transform_intel_shaext(u32 state[8],
+ const unsigned char *input_data,
+ size_t num_blks);
+
+static unsigned int
+do_sha256_transform_intel_shaext(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA256_CONTEXT *hd = ctx;
+ return _gcry_sha256_transform_intel_shaext (&hd->h0, data, nblks);
+}
#endif
+
#ifdef USE_ARM_CE
- unsigned int use_arm_ce:1;
+unsigned int _gcry_sha256_transform_armv8_ce(u32 state[8],
+ const void *input_data,
+ size_t num_blks);
+
+static unsigned int
+do_sha256_transform_armv8_ce(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA256_CONTEXT *hd = ctx;
+ return _gcry_sha256_transform_armv8_ce (&hd->h0, data, nblks);
+}
#endif
-} SHA256_CONTEXT;
static unsigned int
-transform (void *c, const unsigned char *data, size_t nblks);
+do_transform_generic (void *ctx, const unsigned char *data, size_t nblks);
static void
@@ -145,25 +222,32 @@ sha256_init (void *context, unsigned int flags)
hd->bctx.nblocks_high = 0;
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
- hd->bctx.bwrite = transform;
+ /* Order of feature checks is important here; last match will be
+ * selected. Keep slower implementations at the top and faster at
+ * the bottom. */
+ hd->bctx.bwrite = do_transform_generic;
#ifdef USE_SSSE3
- hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+ if ((features & HWF_INTEL_SSSE3) != 0)
+ hd->bctx.bwrite = do_sha256_transform_amd64_ssse3;
#endif
#ifdef USE_AVX
/* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
* Therefore use this implementation on Intel CPUs only. */
- hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
+ hd->bctx.bwrite = do_sha256_transform_amd64_avx;
#endif
#ifdef USE_AVX2
- hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+ if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2))
+ hd->bctx.bwrite = do_sha256_transform_amd64_avx2;
#endif
#ifdef USE_SHAEXT
- hd->use_shaext = (features & HWF_INTEL_SHAEXT)
- && (features & HWF_INTEL_SSE4_1);
+ if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1))
+ hd->bctx.bwrite = do_sha256_transform_intel_shaext;
#endif
#ifdef USE_ARM_CE
- hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
+ if ((features & HWF_ARM_SHA2) != 0)
+ hd->bctx.bwrite = do_sha256_transform_armv8_ce;
#endif
(void)features;
}
@@ -190,25 +274,32 @@ sha224_init (void *context, unsigned int flags)
hd->bctx.nblocks_high = 0;
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
- hd->bctx.bwrite = transform;
+ /* Order of feature checks is important here; last match will be
+ * selected. Keep slower implementations at the top and faster at
+ * the bottom. */
+ hd->bctx.bwrite = do_transform_generic;
#ifdef USE_SSSE3
- hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+ if ((features & HWF_INTEL_SSSE3) != 0)
+ hd->bctx.bwrite = do_sha256_transform_amd64_ssse3;
#endif
#ifdef USE_AVX
/* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
* Therefore use this implementation on Intel CPUs only. */
- hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
+ hd->bctx.bwrite = do_sha256_transform_amd64_avx;
#endif
#ifdef USE_AVX2
- hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+ if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2))
+ hd->bctx.bwrite = do_sha256_transform_amd64_avx2;
#endif
#ifdef USE_SHAEXT
- hd->use_shaext = (features & HWF_INTEL_SHAEXT)
- && (features & HWF_INTEL_SSE4_1);
+ if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1))
+ hd->bctx.bwrite = do_sha256_transform_intel_shaext;
#endif
#ifdef USE_ARM_CE
- hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
+ if ((features & HWF_ARM_SHA2) != 0)
+ hd->bctx.bwrite = do_sha256_transform_armv8_ce;
#endif
(void)features;
}
@@ -247,7 +338,7 @@ sha224_init (void *context, unsigned int flags)
+ w[(i-16)&0x0f] )
static unsigned int
-transform_blk (void *ctx, const unsigned char *data)
+do_transform_generic (void *ctx, const unsigned char *data, size_t nblks)
{
SHA256_CONTEXT *hd = ctx;
static const u32 K[64] = {
@@ -269,219 +360,109 @@ transform_blk (void *ctx, const unsigned char *data)
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
- u32 a,b,c,d,e,f,g,h,t1,t2;
- u32 w[16];
-
- a = hd->h0;
- b = hd->h1;
- c = hd->h2;
- d = hd->h3;
- e = hd->h4;
- f = hd->h5;
- g = hd->h6;
- h = hd->h7;
-
- R(a, b, c, d, e, f, g, h, K[0], I(0));
- R(h, a, b, c, d, e, f, g, K[1], I(1));
- R(g, h, a, b, c, d, e, f, K[2], I(2));
- R(f, g, h, a, b, c, d, e, K[3], I(3));
- R(e, f, g, h, a, b, c, d, K[4], I(4));
- R(d, e, f, g, h, a, b, c, K[5], I(5));
- R(c, d, e, f, g, h, a, b, K[6], I(6));
- R(b, c, d, e, f, g, h, a, K[7], I(7));
- R(a, b, c, d, e, f, g, h, K[8], I(8));
- R(h, a, b, c, d, e, f, g, K[9], I(9));
- R(g, h, a, b, c, d, e, f, K[10], I(10));
- R(f, g, h, a, b, c, d, e, K[11], I(11));
- R(e, f, g, h, a, b, c, d, K[12], I(12));
- R(d, e, f, g, h, a, b, c, K[13], I(13));
- R(c, d, e, f, g, h, a, b, K[14], I(14));
- R(b, c, d, e, f, g, h, a, K[15], I(15));
-
- R(a, b, c, d, e, f, g, h, K[16], W(16));
- R(h, a, b, c, d, e, f, g, K[17], W(17));
- R(g, h, a, b, c, d, e, f, K[18], W(18));
- R(f, g, h, a, b, c, d, e, K[19], W(19));
- R(e, f, g, h, a, b, c, d, K[20], W(20));
- R(d, e, f, g, h, a, b, c, K[21], W(21));
- R(c, d, e, f, g, h, a, b, K[22], W(22));
- R(b, c, d, e, f, g, h, a, K[23], W(23));
- R(a, b, c, d, e, f, g, h, K[24], W(24));
- R(h, a, b, c, d, e, f, g, K[25], W(25));
- R(g, h, a, b, c, d, e, f, K[26], W(26));
- R(f, g, h, a, b, c, d, e, K[27], W(27));
- R(e, f, g, h, a, b, c, d, K[28], W(28));
- R(d, e, f, g, h, a, b, c, K[29], W(29));
- R(c, d, e, f, g, h, a, b, K[30], W(30));
- R(b, c, d, e, f, g, h, a, K[31], W(31));
-
- R(a, b, c, d, e, f, g, h, K[32], W(32));
- R(h, a, b, c, d, e, f, g, K[33], W(33));
- R(g, h, a, b, c, d, e, f, K[34], W(34));
- R(f, g, h, a, b, c, d, e, K[35], W(35));
- R(e, f, g, h, a, b, c, d, K[36], W(36));
- R(d, e, f, g, h, a, b, c, K[37], W(37));
- R(c, d, e, f, g, h, a, b, K[38], W(38));
- R(b, c, d, e, f, g, h, a, K[39], W(39));
- R(a, b, c, d, e, f, g, h, K[40], W(40));
- R(h, a, b, c, d, e, f, g, K[41], W(41));
- R(g, h, a, b, c, d, e, f, K[42], W(42));
- R(f, g, h, a, b, c, d, e, K[43], W(43));
- R(e, f, g, h, a, b, c, d, K[44], W(44));
- R(d, e, f, g, h, a, b, c, K[45], W(45));
- R(c, d, e, f, g, h, a, b, K[46], W(46));
- R(b, c, d, e, f, g, h, a, K[47], W(47));
-
- R(a, b, c, d, e, f, g, h, K[48], W(48));
- R(h, a, b, c, d, e, f, g, K[49], W(49));
- R(g, h, a, b, c, d, e, f, K[50], W(50));
- R(f, g, h, a, b, c, d, e, K[51], W(51));
- R(e, f, g, h, a, b, c, d, K[52], W(52));
- R(d, e, f, g, h, a, b, c, K[53], W(53));
- R(c, d, e, f, g, h, a, b, K[54], W(54));
- R(b, c, d, e, f, g, h, a, K[55], W(55));
- R(a, b, c, d, e, f, g, h, K[56], W(56));
- R(h, a, b, c, d, e, f, g, K[57], W(57));
- R(g, h, a, b, c, d, e, f, K[58], W(58));
- R(f, g, h, a, b, c, d, e, K[59], W(59));
- R(e, f, g, h, a, b, c, d, K[60], W(60));
- R(d, e, f, g, h, a, b, c, K[61], W(61));
- R(c, d, e, f, g, h, a, b, K[62], W(62));
- R(b, c, d, e, f, g, h, a, K[63], W(63));
-
- hd->h0 += a;
- hd->h1 += b;
- hd->h2 += c;
- hd->h3 += d;
- hd->h4 += e;
- hd->h5 += f;
- hd->h6 += g;
- hd->h7 += h;
-
- return /*burn_stack*/ 26*4+32;
-}
-#undef S0
-#undef S1
-#undef R
-
-
-/* Assembly implementations use SystemV ABI, ABI conversion and additional
- * stack to store XMM6-XMM15 needed on Win64. */
-#undef ASM_FUNC_ABI
-#undef ASM_EXTRA_STACK
-#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2) || \
- defined(USE_SHAEXT)
-# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
-# define ASM_FUNC_ABI __attribute__((sysv_abi))
-# define ASM_EXTRA_STACK (10 * 16)
-# else
-# define ASM_FUNC_ABI
-# define ASM_EXTRA_STACK 0
-# endif
-#endif
-
-
-#ifdef USE_SSSE3
-unsigned int _gcry_sha256_transform_amd64_ssse3(const void *input_data,
- u32 state[8],
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_AVX
-unsigned int _gcry_sha256_transform_amd64_avx(const void *input_data,
- u32 state[8],
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_AVX2
-unsigned int _gcry_sha256_transform_amd64_avx2(const void *input_data,
- u32 state[8],
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_SHAEXT
-/* Does not need ASM_FUNC_ABI */
-unsigned int
-_gcry_sha256_transform_intel_shaext(u32 state[8],
- const unsigned char *input_data,
- size_t num_blks);
-#endif
-
-#ifdef USE_ARM_CE
-unsigned int _gcry_sha256_transform_armv8_ce(u32 state[8],
- const void *input_data,
- size_t num_blks);
-#endif
-
-static unsigned int
-transform (void *ctx, const unsigned char *data, size_t nblks)
-{
- SHA256_CONTEXT *hd = ctx;
- unsigned int burn;
-
-#ifdef USE_SHAEXT
- if (hd->use_shaext)
- {
- burn = _gcry_sha256_transform_intel_shaext (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-
-#ifdef USE_AVX2
- if (hd->use_avx2)
- {
- burn = _gcry_sha256_transform_amd64_avx2 (data, &hd->h0, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-
-#ifdef USE_AVX
- if (hd->use_avx)
- {
- burn = _gcry_sha256_transform_amd64_avx (data, &hd->h0, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-
-#ifdef USE_SSSE3
- if (hd->use_ssse3)
+ do
{
- burn = _gcry_sha256_transform_amd64_ssse3 (data, &hd->h0, nblks);
- burn += burn ? 4 * sizeof(void*) + ASM_EXTRA_STACK : 0;
- return burn;
- }
-#endif
-#ifdef USE_ARM_CE
- if (hd->use_arm_ce)
- {
- burn = _gcry_sha256_transform_armv8_ce (&hd->h0, data, nblks);
- burn += burn ? 4 * sizeof(void*) : 0;
- return burn;
- }
-#endif
+ u32 a,b,c,d,e,f,g,h,t1,t2;
+ u32 w[16];
+
+ a = hd->h0;
+ b = hd->h1;
+ c = hd->h2;
+ d = hd->h3;
+ e = hd->h4;
+ f = hd->h5;
+ g = hd->h6;
+ h = hd->h7;
+
+ R(a, b, c, d, e, f, g, h, K[0], I(0));
+ R(h, a, b, c, d, e, f, g, K[1], I(1));
+ R(g, h, a, b, c, d, e, f, K[2], I(2));
+ R(f, g, h, a, b, c, d, e, K[3], I(3));
+ R(e, f, g, h, a, b, c, d, K[4], I(4));
+ R(d, e, f, g, h, a, b, c, K[5], I(5));
+ R(c, d, e, f, g, h, a, b, K[6], I(6));
+ R(b, c, d, e, f, g, h, a, K[7], I(7));
+ R(a, b, c, d, e, f, g, h, K[8], I(8));
+ R(h, a, b, c, d, e, f, g, K[9], I(9));
+ R(g, h, a, b, c, d, e, f, K[10], I(10));
+ R(f, g, h, a, b, c, d, e, K[11], I(11));
+ R(e, f, g, h, a, b, c, d, K[12], I(12));
+ R(d, e, f, g, h, a, b, c, K[13], I(13));
+ R(c, d, e, f, g, h, a, b, K[14], I(14));
+ R(b, c, d, e, f, g, h, a, K[15], I(15));
+
+ R(a, b, c, d, e, f, g, h, K[16], W(16));
+ R(h, a, b, c, d, e, f, g, K[17], W(17));
+ R(g, h, a, b, c, d, e, f, K[18], W(18));
+ R(f, g, h, a, b, c, d, e, K[19], W(19));
+ R(e, f, g, h, a, b, c, d, K[20], W(20));
+ R(d, e, f, g, h, a, b, c, K[21], W(21));
+ R(c, d, e, f, g, h, a, b, K[22], W(22));
+ R(b, c, d, e, f, g, h, a, K[23], W(23));
+ R(a, b, c, d, e, f, g, h, K[24], W(24));
+ R(h, a, b, c, d, e, f, g, K[25], W(25));
+ R(g, h, a, b, c, d, e, f, K[26], W(26));
+ R(f, g, h, a, b, c, d, e, K[27], W(27));
+ R(e, f, g, h, a, b, c, d, K[28], W(28));
+ R(d, e, f, g, h, a, b, c, K[29], W(29));
+ R(c, d, e, f, g, h, a, b, K[30], W(30));
+ R(b, c, d, e, f, g, h, a, K[31], W(31));
+
+ R(a, b, c, d, e, f, g, h, K[32], W(32));
+ R(h, a, b, c, d, e, f, g, K[33], W(33));
+ R(g, h, a, b, c, d, e, f, K[34], W(34));
+ R(f, g, h, a, b, c, d, e, K[35], W(35));
+ R(e, f, g, h, a, b, c, d, K[36], W(36));
+ R(d, e, f, g, h, a, b, c, K[37], W(37));
+ R(c, d, e, f, g, h, a, b, K[38], W(38));
+ R(b, c, d, e, f, g, h, a, K[39], W(39));
+ R(a, b, c, d, e, f, g, h, K[40], W(40));
+ R(h, a, b, c, d, e, f, g, K[41], W(41));
+ R(g, h, a, b, c, d, e, f, K[42], W(42));
+ R(f, g, h, a, b, c, d, e, K[43], W(43));
+ R(e, f, g, h, a, b, c, d, K[44], W(44));
+ R(d, e, f, g, h, a, b, c, K[45], W(45));
+ R(c, d, e, f, g, h, a, b, K[46], W(46));
+ R(b, c, d, e, f, g, h, a, K[47], W(47));
+
+ R(a, b, c, d, e, f, g, h, K[48], W(48));
+ R(h, a, b, c, d, e, f, g, K[49], W(49));
+ R(g, h, a, b, c, d, e, f, K[50], W(50));
+ R(f, g, h, a, b, c, d, e, K[51], W(51));
+ R(e, f, g, h, a, b, c, d, K[52], W(52));
+ R(d, e, f, g, h, a, b, c, K[53], W(53));
+ R(c, d, e, f, g, h, a, b, K[54], W(54));
+ R(b, c, d, e, f, g, h, a, K[55], W(55));
+ R(a, b, c, d, e, f, g, h, K[56], W(56));
+ R(h, a, b, c, d, e, f, g, K[57], W(57));
+ R(g, h, a, b, c, d, e, f, K[58], W(58));
+ R(f, g, h, a, b, c, d, e, K[59], W(59));
+ R(e, f, g, h, a, b, c, d, K[60], W(60));
+ R(d, e, f, g, h, a, b, c, K[61], W(61));
+ R(c, d, e, f, g, h, a, b, K[62], W(62));
+ R(b, c, d, e, f, g, h, a, K[63], W(63));
+
+ hd->h0 += a;
+ hd->h1 += b;
+ hd->h2 += c;
+ hd->h3 += d;
+ hd->h4 += e;
+ hd->h5 += f;
+ hd->h6 += g;
+ hd->h7 += h;
- do
- {
- burn = transform_blk (hd, data);
data += 64;
}
while (--nblks);
-#ifdef ASM_EXTRA_STACK
- /* 'transform_blk' is typically inlined and XMM6-XMM15 are stored at
- * the prologue of this function. Therefore need to add ASM_EXTRA_STACK to
- * here too.
- */
- burn += ASM_EXTRA_STACK;
-#endif
-
- return burn;
+ return 26*4 + 32 + 3 * sizeof(void*);
}
+#undef S0
+#undef S1
+#undef R
+
/*
The routine finally terminates the computation and returns the
@@ -534,7 +515,7 @@ sha256_final(void *context)
/* append the 64 bit count */
buf_put_be32(hd->bctx.buf + 56, msb);
buf_put_be32(hd->bctx.buf + 60, lsb);
- burn = transform (hd, hd->bctx.buf, 1);
+ burn = (*hd->bctx.bwrite) (hd, hd->bctx.buf, 1);
_gcry_burn_stack (burn);
p = hd->bctx.buf;
diff --git a/cipher/sha512-armv7-neon.S b/cipher/sha512-armv7-neon.S
index a9d127245..6596f2cdb 100644
--- a/cipher/sha512-armv7-neon.S
+++ b/cipher/sha512-armv7-neon.S
@@ -443,6 +443,7 @@ _gcry_sha512_transform_armv7_neon:
veor.u64 %q2, %q2;
veor.u64 %q3, %q3;
+ eor %r0, %r0;
pop {%pc};
.size _gcry_sha512_transform_armv7_neon,.-_gcry_sha512_transform_armv7_neon;
diff --git a/cipher/sha512.c b/cipher/sha512.c
index 9405de80b..721f34054 100644
--- a/cipher/sha512.c
+++ b/cipher/sha512.c
@@ -113,22 +113,145 @@ typedef struct
{
gcry_md_block_ctx_t bctx;
SHA512_STATE state;
+} SHA512_CONTEXT;
+
+
+static const u64 k[] =
+ {
+ U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd),
+ U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc),
+ U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019),
+ U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118),
+ U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe),
+ U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2),
+ U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1),
+ U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694),
+ U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3),
+ U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65),
+ U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483),
+ U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5),
+ U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210),
+ U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4),
+ U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725),
+ U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70),
+ U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926),
+ U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df),
+ U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8),
+ U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b),
+ U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001),
+ U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30),
+ U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910),
+ U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8),
+ U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53),
+ U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8),
+ U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb),
+ U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3),
+ U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60),
+ U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec),
+ U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9),
+ U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b),
+ U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207),
+ U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178),
+ U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6),
+ U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b),
+ U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493),
+ U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c),
+ U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a),
+ U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817)
+ };
+
+
+/* AMD64 assembly implementations use SystemV ABI, ABI conversion and additional
+ * stack to store XMM6-XMM15 needed on Win64. */
+#undef ASM_FUNC_ABI
+#undef ASM_EXTRA_STACK
+#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2)
+# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
+# define ASM_FUNC_ABI __attribute__((sysv_abi))
+# define ASM_EXTRA_STACK (10 * 16 + 4 * sizeof(void *))
+# else
+# define ASM_FUNC_ABI
+# define ASM_EXTRA_STACK 0
+# endif
+#endif
+
+
#ifdef USE_ARM_NEON_ASM
- unsigned int use_neon:1;
+unsigned int _gcry_sha512_transform_armv7_neon (SHA512_STATE *hd,
+ const unsigned char *data,
+ const u64 k[], size_t num_blks);
+
+static unsigned int
+do_sha512_transform_armv7_neon(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA512_CONTEXT *hd = ctx;
+ return _gcry_sha512_transform_armv7_neon (&hd->state, data, k, nblks);
+}
#endif
+
#ifdef USE_SSSE3
- unsigned int use_ssse3:1;
+unsigned int _gcry_sha512_transform_amd64_ssse3(const void *input_data,
+ void *state,
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha512_transform_amd64_ssse3(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA512_CONTEXT *hd = ctx;
+ return _gcry_sha512_transform_amd64_ssse3 (data, &hd->state, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
+
#ifdef USE_AVX
- unsigned int use_avx:1;
+unsigned int _gcry_sha512_transform_amd64_avx(const void *input_data,
+ void *state,
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha512_transform_amd64_avx(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA512_CONTEXT *hd = ctx;
+ return _gcry_sha512_transform_amd64_avx (data, &hd->state, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
+
#ifdef USE_AVX2
- unsigned int use_avx2:1;
+unsigned int _gcry_sha512_transform_amd64_avx2(const void *input_data,
+ void *state,
+ size_t num_blks) ASM_FUNC_ABI;
+
+static unsigned int
+do_sha512_transform_amd64_avx2(void *ctx, const unsigned char *data,
+ size_t nblks)
+{
+ SHA512_CONTEXT *hd = ctx;
+ return _gcry_sha512_transform_amd64_avx2 (data, &hd->state, nblks)
+ + ASM_EXTRA_STACK;
+}
#endif
-} SHA512_CONTEXT;
+
+
+#ifdef USE_ARM_ASM
+unsigned int _gcry_sha512_transform_arm (SHA512_STATE *hd,
+ const unsigned char *data,
+ const u64 k[], size_t num_blks);
static unsigned int
-transform (void *context, const unsigned char *data, size_t nblks);
+do_transform_generic (void *context, const unsigned char *data, size_t nblks)
+{
+ SHA512_CONTEXT *hd = context;
+ return _gcry_sha512_transform_armv7_neon (&hd->state, data, k, nblks);
+}
+#else
+static unsigned int
+do_transform_generic (void *context, const unsigned char *data, size_t nblks);
+#endif
+
static void
sha512_init (void *context, unsigned int flags)
@@ -138,6 +261,7 @@ sha512_init (void *context, unsigned int flags)
unsigned int features = _gcry_get_hw_features ();
(void)flags;
+ (void)k;
hd->h0 = U64_C(0x6a09e667f3bcc908);
hd->h1 = U64_C(0xbb67ae8584caa73b);
@@ -152,21 +276,27 @@ sha512_init (void *context, unsigned int flags)
ctx->bctx.nblocks_high = 0;
ctx->bctx.count = 0;
ctx->bctx.blocksize = 128;
- ctx->bctx.bwrite = transform;
+ /* Order of feature checks is important here; last match will be
+ * selected. Keep slower implementations at the top and faster at
+ * the bottom. */
+ ctx->bctx.bwrite = do_transform_generic;
#ifdef USE_ARM_NEON_ASM
- ctx->use_neon = (features & HWF_ARM_NEON) != 0;
+ if ((features & HWF_ARM_NEON) != 0)
+ ctx->bctx.bwrite = do_sha512_transform_armv7_neon;
#endif
#ifdef USE_SSSE3
- ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+ if ((features & HWF_INTEL_SSSE3) != 0)
+ ctx->bctx.bwrite = do_sha512_transform_amd64_ssse3;
#endif
#ifdef USE_AVX
- ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
+ ctx->bctx.bwrite = do_sha512_transform_amd64_avx;
#endif
#ifdef USE_AVX2
- ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+ if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2))
+ ctx->bctx.bwrite = do_sha512_transform_amd64_avx2;
#endif
-
(void)features;
}
@@ -192,69 +322,31 @@ sha384_init (void *context, unsigned int flags)
ctx->bctx.nblocks_high = 0;
ctx->bctx.count = 0;
ctx->bctx.blocksize = 128;
- ctx->bctx.bwrite = transform;
+ /* Order of feature checks is important here; last match will be
+ * selected. Keep slower implementations at the top and faster at
+ * the bottom. */
+ ctx->bctx.bwrite = do_transform_generic;
#ifdef USE_ARM_NEON_ASM
- ctx->use_neon = (features & HWF_ARM_NEON) != 0;
+ if ((features & HWF_ARM_NEON) != 0)
+ ctx->bctx.bwrite = do_sha512_transform_armv7_neon;
#endif
#ifdef USE_SSSE3
- ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+ if ((features & HWF_INTEL_SSSE3) != 0)
+ ctx->bctx.bwrite = do_sha512_transform_amd64_ssse3;
#endif
#ifdef USE_AVX
- ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
+ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD))
+ ctx->bctx.bwrite = do_sha512_transform_amd64_avx;
#endif
#ifdef USE_AVX2
- ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+ if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2))
+ ctx->bctx.bwrite = do_sha512_transform_amd64_avx2;
#endif
-
(void)features;
}
-static const u64 k[] =
- {
- U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd),
- U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc),
- U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019),
- U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118),
- U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe),
- U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2),
- U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1),
- U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694),
- U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3),
- U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65),
- U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483),
- U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5),
- U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210),
- U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4),
- U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725),
- U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70),
- U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926),
- U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df),
- U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8),
- U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b),
- U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001),
- U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30),
- U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910),
- U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8),
- U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53),
- U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8),
- U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb),
- U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3),
- U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60),
- U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec),
- U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9),
- U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b),
- U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207),
- U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178),
- U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6),
- U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b),
- U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493),
- U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c),
- U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a),
- U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817)
- };
-
#ifndef USE_ARM_ASM
static inline u64
@@ -291,372 +383,240 @@ Sum1 (u64 x)
* Transform the message W which consists of 16 64-bit-words
*/
static unsigned int
-transform_blk (SHA512_STATE *hd, const unsigned char *data)
-{
- u64 a, b, c, d, e, f, g, h;
- u64 w[16];
- int t;
-
- /* get values from the chaining vars */
- a = hd->h0;
- b = hd->h1;
- c = hd->h2;
- d = hd->h3;
- e = hd->h4;
- f = hd->h5;
- g = hd->h6;
- h = hd->h7;
-
- for ( t = 0; t < 16; t++ )
- w[t] = buf_get_be64(data + t * 8);
-
-#define S0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
-#define S1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
-
- for (t = 0; t < 80 - 16; )
- {
- u64 t1, t2;
-
- /* Performance on a AMD Athlon(tm) Dual Core Processor 4050e
- with gcc 4.3.3 using gcry_md_hash_buffer of each 10000 bytes
- initialized to 0,1,2,3...255,0,... and 1000 iterations:
-
- Not unrolled with macros: 440ms
- Unrolled with macros: 350ms
- Unrolled with inline: 330ms
- */
-#if 0 /* Not unrolled. */
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t%16];
- w[t%16] += S1 (w[(t - 2)%16]) + w[(t - 7)%16] + S0 (w[(t - 15)%16]);
- t2 = Sum0 (a) + Maj (a, b, c);
- h = g;
- g = f;
- f = e;
- e = d + t1;
- d = c;
- c = b;
- b = a;
- a = t1 + t2;
- t++;
-#else /* Unrolled to interweave the chain variables. */
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
- w[0] += S1 (w[14]) + w[9] + S0 (w[1]);
- t2 = Sum0 (a) + Maj (a, b, c);
- d += t1;
- h = t1 + t2;
-
- t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
- w[1] += S1 (w[15]) + w[10] + S0 (w[2]);
- t2 = Sum0 (h) + Maj (h, a, b);
- c += t1;
- g = t1 + t2;
-
- t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
- w[2] += S1 (w[0]) + w[11] + S0 (w[3]);
- t2 = Sum0 (g) + Maj (g, h, a);
- b += t1;
- f = t1 + t2;
-
- t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
- w[3] += S1 (w[1]) + w[12] + S0 (w[4]);
- t2 = Sum0 (f) + Maj (f, g, h);
- a += t1;
- e = t1 + t2;
-
- t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
- w[4] += S1 (w[2]) + w[13] + S0 (w[5]);
- t2 = Sum0 (e) + Maj (e, f, g);
- h += t1;
- d = t1 + t2;
-
- t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
- w[5] += S1 (w[3]) + w[14] + S0 (w[6]);
- t2 = Sum0 (d) + Maj (d, e, f);
- g += t1;
- c = t1 + t2;
-
- t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
- w[6] += S1 (w[4]) + w[15] + S0 (w[7]);
- t2 = Sum0 (c) + Maj (c, d, e);
- f += t1;
- b = t1 + t2;
-
- t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
- w[7] += S1 (w[5]) + w[0] + S0 (w[8]);
- t2 = Sum0 (b) + Maj (b, c, d);
- e += t1;
- a = t1 + t2;
-
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
- w[8] += S1 (w[6]) + w[1] + S0 (w[9]);
- t2 = Sum0 (a) + Maj (a, b, c);
- d += t1;
- h = t1 + t2;
-
- t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
- w[9] += S1 (w[7]) + w[2] + S0 (w[10]);
- t2 = Sum0 (h) + Maj (h, a, b);
- c += t1;
- g = t1 + t2;
-
- t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
- w[10] += S1 (w[8]) + w[3] + S0 (w[11]);
- t2 = Sum0 (g) + Maj (g, h, a);
- b += t1;
- f = t1 + t2;
-
- t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
- w[11] += S1 (w[9]) + w[4] + S0 (w[12]);
- t2 = Sum0 (f) + Maj (f, g, h);
- a += t1;
- e = t1 + t2;
-
- t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
- w[12] += S1 (w[10]) + w[5] + S0 (w[13]);
- t2 = Sum0 (e) + Maj (e, f, g);
- h += t1;
- d = t1 + t2;
-
- t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
- w[13] += S1 (w[11]) + w[6] + S0 (w[14]);
- t2 = Sum0 (d) + Maj (d, e, f);
- g += t1;
- c = t1 + t2;
-
- t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
- w[14] += S1 (w[12]) + w[7] + S0 (w[15]);
- t2 = Sum0 (c) + Maj (c, d, e);
- f += t1;
- b = t1 + t2;
-
- t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
- w[15] += S1 (w[13]) + w[8] + S0 (w[0]);
- t2 = Sum0 (b) + Maj (b, c, d);
- e += t1;
- a = t1 + t2;
-
- t += 16;
-#endif
- }
-
- for (; t < 80; )
- {
- u64 t1, t2;
-
-#if 0 /* Not unrolled. */
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t%16];
- t2 = Sum0 (a) + Maj (a, b, c);
- h = g;
- g = f;
- f = e;
- e = d + t1;
- d = c;
- c = b;
- b = a;
- a = t1 + t2;
- t++;
-#else /* Unrolled to interweave the chain variables. */
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
- t2 = Sum0 (a) + Maj (a, b, c);
- d += t1;
- h = t1 + t2;
-
- t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
- t2 = Sum0 (h) + Maj (h, a, b);
- c += t1;
- g = t1 + t2;
-
- t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
- t2 = Sum0 (g) + Maj (g, h, a);
- b += t1;
- f = t1 + t2;
-
- t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
- t2 = Sum0 (f) + Maj (f, g, h);
- a += t1;
- e = t1 + t2;
-
- t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
- t2 = Sum0 (e) + Maj (e, f, g);
- h += t1;
- d = t1 + t2;
-
- t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
- t2 = Sum0 (d) + Maj (d, e, f);
- g += t1;
- c = t1 + t2;
-
- t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
- t2 = Sum0 (c) + Maj (c, d, e);
- f += t1;
- b = t1 + t2;
-
- t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
- t2 = Sum0 (b) + Maj (b, c, d);
- e += t1;
- a = t1 + t2;
-
- t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
- t2 = Sum0 (a) + Maj (a, b, c);
- d += t1;
- h = t1 + t2;
-
- t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
- t2 = Sum0 (h) + Maj (h, a, b);
- c += t1;
- g = t1 + t2;
-
- t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
- t2 = Sum0 (g) + Maj (g, h, a);
- b += t1;
- f = t1 + t2;
-
- t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
- t2 = Sum0 (f) + Maj (f, g, h);
- a += t1;
- e = t1 + t2;
-
- t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
- t2 = Sum0 (e) + Maj (e, f, g);
- h += t1;
- d = t1 + t2;
-
- t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
- t2 = Sum0 (d) + Maj (d, e, f);
- g += t1;
- c = t1 + t2;
-
- t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
- t2 = Sum0 (c) + Maj (c, d, e);
- f += t1;
- b = t1 + t2;
-
- t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
- t2 = Sum0 (b) + Maj (b, c, d);
- e += t1;
- a = t1 + t2;
-
- t += 16;
-#endif
- }
-
- /* Update chaining vars. */
- hd->h0 += a;
- hd->h1 += b;
- hd->h2 += c;
- hd->h3 += d;
- hd->h4 += e;
- hd->h5 += f;
- hd->h6 += g;
- hd->h7 += h;
-
- return /* burn_stack */ (8 + 16) * sizeof(u64) + sizeof(u32) +
- 3 * sizeof(void*);
-}
-#endif /*!USE_ARM_ASM*/
-
-/* AMD64 assembly implementations use SystemV ABI, ABI conversion and additional
- * stack to store XMM6-XMM15 needed on Win64. */
-#undef ASM_FUNC_ABI
-#undef ASM_EXTRA_STACK
-#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2)
-# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
-# define ASM_FUNC_ABI __attribute__((sysv_abi))
-# define ASM_EXTRA_STACK (10 * 16)
-# else
-# define ASM_FUNC_ABI
-# define ASM_EXTRA_STACK 0
-# endif
-#endif
-
-
-#ifdef USE_ARM_NEON_ASM
-void _gcry_sha512_transform_armv7_neon (SHA512_STATE *hd,
- const unsigned char *data,
- const u64 k[], size_t num_blks);
-#endif
-
-#ifdef USE_ARM_ASM
-unsigned int _gcry_sha512_transform_arm (SHA512_STATE *hd,
- const unsigned char *data,
- const u64 k[], size_t num_blks);
-#endif
-
-#ifdef USE_SSSE3
-unsigned int _gcry_sha512_transform_amd64_ssse3(const void *input_data,
- void *state,
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_AVX
-unsigned int _gcry_sha512_transform_amd64_avx(const void *input_data,
- void *state,
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-#ifdef USE_AVX2
-unsigned int _gcry_sha512_transform_amd64_avx2(const void *input_data,
- void *state,
- size_t num_blks) ASM_FUNC_ABI;
-#endif
-
-
-static unsigned int
-transform (void *context, const unsigned char *data, size_t nblks)
+do_transform_generic (void *context, const unsigned char *data, size_t nblks)
{
SHA512_CONTEXT *ctx = context;
- unsigned int burn;
-
-#ifdef USE_AVX2
- if (ctx->use_avx2)
- return _gcry_sha512_transform_amd64_avx2 (data, &ctx->state, nblks)
- + 4 * sizeof(void*) + ASM_EXTRA_STACK;
-#endif
-
-#ifdef USE_AVX
- if (ctx->use_avx)
- return _gcry_sha512_transform_amd64_avx (data, &ctx->state, nblks)
- + 4 * sizeof(void*) + ASM_EXTRA_STACK;
-#endif
-
-#ifdef USE_SSSE3
- if (ctx->use_ssse3)
- return _gcry_sha512_transform_amd64_ssse3 (data, &ctx->state, nblks)
- + 4 * sizeof(void*) + ASM_EXTRA_STACK;
-#endif
+ SHA512_STATE *hd = &ctx->state;
-#ifdef USE_ARM_NEON_ASM
- if (ctx->use_neon)
+ do
{
- _gcry_sha512_transform_armv7_neon (&ctx->state, data, k, nblks);
+ u64 a, b, c, d, e, f, g, h;
+ u64 w[16];
+ int t;
+
+ /* get values from the chaining vars */
+ a = hd->h0;
+ b = hd->h1;
+ c = hd->h2;
+ d = hd->h3;
+ e = hd->h4;
+ f = hd->h5;
+ g = hd->h6;
+ h = hd->h7;
+
+ for ( t = 0; t < 16; t++ )
+ w[t] = buf_get_be64(data + t * 8);
- /* _gcry_sha512_transform_armv7_neon does not store sensitive data
- * to stack. */
- return /* no burn_stack */ 0;
- }
-#endif
+#define S0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+#define S1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+
+ for (t = 0; t < 80 - 16; )
+ {
+ u64 t1, t2;
+
+ t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
+ w[0] += S1 (w[14]) + w[9] + S0 (w[1]);
+ t2 = Sum0 (a) + Maj (a, b, c);
+ d += t1;
+ h = t1 + t2;
+
+ t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
+ w[1] += S1 (w[15]) + w[10] + S0 (w[2]);
+ t2 = Sum0 (h) + Maj (h, a, b);
+ c += t1;
+ g = t1 + t2;
+
+ t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
+ w[2] += S1 (w[0]) + w[11] + S0 (w[3]);
+ t2 = Sum0 (g) + Maj (g, h, a);
+ b += t1;
+ f = t1 + t2;
+
+ t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
+ w[3] += S1 (w[1]) + w[12] + S0 (w[4]);
+ t2 = Sum0 (f) + Maj (f, g, h);
+ a += t1;
+ e = t1 + t2;
+
+ t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
+ w[4] += S1 (w[2]) + w[13] + S0 (w[5]);
+ t2 = Sum0 (e) + Maj (e, f, g);
+ h += t1;
+ d = t1 + t2;
+
+ t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
+ w[5] += S1 (w[3]) + w[14] + S0 (w[6]);
+ t2 = Sum0 (d) + Maj (d, e, f);
+ g += t1;
+ c = t1 + t2;
+
+ t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
+ w[6] += S1 (w[4]) + w[15] + S0 (w[7]);
+ t2 = Sum0 (c) + Maj (c, d, e);
+ f += t1;
+ b = t1 + t2;
+
+ t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
+ w[7] += S1 (w[5]) + w[0] + S0 (w[8]);
+ t2 = Sum0 (b) + Maj (b, c, d);
+ e += t1;
+ a = t1 + t2;
+
+ t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
+ w[8] += S1 (w[6]) + w[1] + S0 (w[9]);
+ t2 = Sum0 (a) + Maj (a, b, c);
+ d += t1;
+ h = t1 + t2;
+
+ t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
+ w[9] += S1 (w[7]) + w[2] + S0 (w[10]);
+ t2 = Sum0 (h) + Maj (h, a, b);
+ c += t1;
+ g = t1 + t2;
+
+ t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
+ w[10] += S1 (w[8]) + w[3] + S0 (w[11]);
+ t2 = Sum0 (g) + Maj (g, h, a);
+ b += t1;
+ f = t1 + t2;
+
+ t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
+ w[11] += S1 (w[9]) + w[4] + S0 (w[12]);
+ t2 = Sum0 (f) + Maj (f, g, h);
+ a += t1;
+ e = t1 + t2;
+
+ t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
+ w[12] += S1 (w[10]) + w[5] + S0 (w[13]);
+ t2 = Sum0 (e) + Maj (e, f, g);
+ h += t1;
+ d = t1 + t2;
+
+ t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
+ w[13] += S1 (w[11]) + w[6] + S0 (w[14]);
+ t2 = Sum0 (d) + Maj (d, e, f);
+ g += t1;
+ c = t1 + t2;
+
+ t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
+ w[14] += S1 (w[12]) + w[7] + S0 (w[15]);
+ t2 = Sum0 (c) + Maj (c, d, e);
+ f += t1;
+ b = t1 + t2;
+
+ t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
+ w[15] += S1 (w[13]) + w[8] + S0 (w[0]);
+ t2 = Sum0 (b) + Maj (b, c, d);
+ e += t1;
+ a = t1 + t2;
+
+ t += 16;
+ }
+
+ for (; t < 80; )
+ {
+ u64 t1, t2;
+
+ t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
+ t2 = Sum0 (a) + Maj (a, b, c);
+ d += t1;
+ h = t1 + t2;
+
+ t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
+ t2 = Sum0 (h) + Maj (h, a, b);
+ c += t1;
+ g = t1 + t2;
+
+ t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
+ t2 = Sum0 (g) + Maj (g, h, a);
+ b += t1;
+ f = t1 + t2;
+
+ t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
+ t2 = Sum0 (f) + Maj (f, g, h);
+ a += t1;
+ e = t1 + t2;
+
+ t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
+ t2 = Sum0 (e) + Maj (e, f, g);
+ h += t1;
+ d = t1 + t2;
+
+ t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
+ t2 = Sum0 (d) + Maj (d, e, f);
+ g += t1;
+ c = t1 + t2;
+
+ t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
+ t2 = Sum0 (c) + Maj (c, d, e);
+ f += t1;
+ b = t1 + t2;
+
+ t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
+ t2 = Sum0 (b) + Maj (b, c, d);
+ e += t1;
+ a = t1 + t2;
+
+ t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
+ t2 = Sum0 (a) + Maj (a, b, c);
+ d += t1;
+ h = t1 + t2;
+
+ t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
+ t2 = Sum0 (h) + Maj (h, a, b);
+ c += t1;
+ g = t1 + t2;
+
+ t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
+ t2 = Sum0 (g) + Maj (g, h, a);
+ b += t1;
+ f = t1 + t2;
+
+ t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
+ t2 = Sum0 (f) + Maj (f, g, h);
+ a += t1;
+ e = t1 + t2;
+
+ t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
+ t2 = Sum0 (e) + Maj (e, f, g);
+ h += t1;
+ d = t1 + t2;
+
+ t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
+ t2 = Sum0 (d) + Maj (d, e, f);
+ g += t1;
+ c = t1 + t2;
+
+ t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
+ t2 = Sum0 (c) + Maj (c, d, e);
+ f += t1;
+ b = t1 + t2;
+
+ t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
+ t2 = Sum0 (b) + Maj (b, c, d);
+ e += t1;
+ a = t1 + t2;
+
+ t += 16;
+ }
+
+ /* Update chaining vars. */
+ hd->h0 += a;
+ hd->h1 += b;
+ hd->h2 += c;
+ hd->h3 += d;
+ hd->h4 += e;
+ hd->h5 += f;
+ hd->h6 += g;
+ hd->h7 += h;
-#ifdef USE_ARM_ASM
- burn = _gcry_sha512_transform_arm (&ctx->state, data, k, nblks);
-#else
- do
- {
- burn = transform_blk (&ctx->state, data) + 3 * sizeof(void*);
data += 128;
}
while (--nblks);
-#ifdef ASM_EXTRA_STACK
- /* 'transform_blk' is typically inlined and XMM6-XMM15 are stored at
- * the prologue of this function. Therefore need to add ASM_EXTRA_STACK to
- * here too.
- */
- burn += ASM_EXTRA_STACK;
-#endif
-#endif
-
- return burn;
+ return (8 + 16) * sizeof(u64) + sizeof(u32) + 3 * sizeof(void*);
}
+#endif /*!USE_ARM_ASM*/
/* The routine final terminates the computation and
@@ -713,7 +673,7 @@ sha512_final (void *context)
/* append the 128 bit count */
buf_put_be64(hd->bctx.buf + 112, msb);
buf_put_be64(hd->bctx.buf + 120, lsb);
- stack_burn_depth = transform (hd, hd->bctx.buf, 1);
+ stack_burn_depth = (*hd->bctx.bwrite) (hd, hd->bctx.buf, 1);
_gcry_burn_stack (stack_burn_depth);
p = hd->bctx.buf;
--
2.17.1
More information about the Gcrypt-devel
mailing list