[PATCH 1/3] Add ARMv8/AArch32 Crypto Extension implemenation of SHA-256
Jussi Kivilinna
jussi.kivilinna at iki.fi
Tue Jul 12 11:53:56 CEST 2016
* cipher/Makefile.am: Add 'sha256-armv8-aarch32-ce.S'.
* cipher/sha256-armv8-aarch32-ce.S: New.
* cipher/sha256.c (USE_ARM_CE): New.
(sha256_init, sha224_init): Check features for HWF_ARM_SHA1.
[USE_ARM_CE] (_gcry_sha256_transform_armv8_ce): New.
(transform) [USE_ARM_CE]: Use ARMv8 CE implementation if HW supports.
(SHA256_CONTEXT): Add 'use_arm_ce'.
* configure.ac: Add 'sha256-armv8-aarch32-ce.lo'.
--
Benchmark on Cortex-A53 (1152 Mhz):
Before:
| nanosecs/byte mebibytes/sec cycles/byte
SHA256 | 17.38 ns/B 54.88 MiB/s 20.02 c/B
After (~9.3x faster):
| nanosecs/byte mebibytes/sec cycles/byte
SHA256 | 1.85 ns/B 515.7 MiB/s 2.13 c/B
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
0 files changed
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index 571673e..1e97050 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -89,6 +89,7 @@ serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S serpent-armv7-neon.S \
sha1.c sha1-ssse3-amd64.S sha1-avx-amd64.S sha1-avx-bmi2-amd64.S \
sha1-armv7-neon.S sha1-armv8-aarch32-ce.S \
sha256.c sha256-ssse3-amd64.S sha256-avx-amd64.S sha256-avx2-bmi2-amd64.S \
+ sha256-armv8-aarch32-ce.S \
sha512.c sha512-ssse3-amd64.S sha512-avx-amd64.S sha512-avx2-bmi2-amd64.S \
sha512-armv7-neon.S sha512-arm.S \
keccak.c keccak_permute_32.h keccak_permute_64.h keccak-armv7-neon.S \
diff --git a/cipher/sha256-armv8-aarch32-ce.S b/cipher/sha256-armv8-aarch32-ce.S
new file mode 100644
index 0000000..a0dbcea
--- /dev/null
+++ b/cipher/sha256-armv8-aarch32-ce.S
@@ -0,0 +1,231 @@
+/* sha256-armv8-aarch32-ce.S - ARM/CE accelerated SHA-256 transform function
+ * Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+
+#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
+ defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
+ defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) && defined(USE_SHA256)
+
+.syntax unified
+.fpu crypto-neon-fp-armv8
+.arm
+
+.text
+
+#ifdef __PIC__
+# define GET_DATA_POINTER(reg, name, rtmp) \
+ ldr reg, 1f; \
+ ldr rtmp, 2f; \
+ b 3f; \
+ 1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
+ 2: .word name(GOT); \
+ 3: add reg, pc, reg; \
+ ldr reg, [reg, rtmp];
+#else
+# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
+#endif
+
+
+/* Constants */
+
+.align 4
+gcry_sha256_aarch32_ce_K:
+.LK:
+ .long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+ .long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+ .long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+ .long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+ .long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+ .long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+ .long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+ .long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+ .long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+ .long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+ .long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+ .long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+ .long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+ .long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+ .long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+ .long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+
+/* Register macros */
+
+#define qH0123 q0
+#define qH4567 q1
+
+#define qABCD0 q2
+#define qABCD1 q3
+#define qEFGH q4
+
+#define qT0 q5
+#define qT1 q6
+
+#define qW0 q8
+#define qW1 q9
+#define qW2 q10
+#define qW3 q11
+
+#define qK0 q12
+#define qK1 q13
+#define qK2 q14
+#define qK3 q15
+
+
+/* Round macros */
+
+#define _(...) /*_*/
+
+
+
+/* Other functional macros */
+
+#define CLEAR_REG(reg) veor reg, reg;
+
+
+/*
+ * unsigned int
+ * _gcry_sha256_transform_armv8_ce (u32 state[8], const void *input_data,
+ * size_t num_blks)
+ */
+.align 3
+.globl _gcry_sha256_transform_armv8_ce
+.type _gcry_sha256_transform_armv8_ce,%function;
+_gcry_sha256_transform_armv8_ce:
+ /* input:
+ * r0: ctx, CTX
+ * r1: data (64*nblks bytes)
+ * r2: nblks
+ */
+
+ cmp r2, #0;
+ push {r4,lr};
+ beq .Ldo_nothing;
+
+ vpush {q4-q7};
+
+ GET_DATA_POINTER(r4, .LK, lr);
+ mov lr, r4
+
+ vld1.32 {qH0123-qH4567}, [r0] /* load state */
+
+#define do_loadk(nk0, nk1) vld1.32 {nk0-nk1},[lr]!;
+#define do_add(a, b) vadd.u32 a, a, b;
+#define do_sha256su0(w0, w1) sha256su0.32 w0, w1;
+#define do_sha256su1(w0, w2, w3) sha256su1.32 w0, w2, w3;
+
+#define do_rounds(k, nk0, nk1, w0, w1, w2, w3, loadk_fn, add_fn, su0_fn, su1_fn) \
+ loadk_fn( nk0, nk1 ); \
+ su0_fn( w0, w1 ); \
+ vmov qABCD1, qABCD0; \
+ sha256h.32 qABCD0, qEFGH, k; \
+ sha256h2.32 qEFGH, qABCD1, k; \
+ add_fn( nk0, w2 ); \
+ su1_fn( w0, w2, w3 ); \
+
+ vld1.8 {qW0-qW1}, [r1]!
+ do_loadk(qK0, qK1)
+ vld1.8 {qW2-qW3}, [r1]!
+ vmov qABCD0, qH0123
+ vmov qEFGH, qH4567
+
+ vrev32.8 qW0, qW0
+ vrev32.8 qW1, qW1
+ vrev32.8 qW2, qW2
+ do_add(qK0, qW0)
+ vrev32.8 qW3, qW3
+ do_add(qK1, qW1)
+
+.Loop:
+ do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ subs r2,r2,#1
+ do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
+
+ do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
+
+ do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
+ do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
+
+ beq .Lend
+
+ do_rounds(qK0, qK2, qK3, qW0, _ , qW2, qW3, do_loadk, do_add, _, _)
+ vld1.8 {qW0}, [r1]!
+ mov lr, r4
+ do_rounds(qK1, qK3, _ , qW1, _ , qW3, _ , _ , do_add, _, _)
+ vld1.8 {qW1}, [r1]!
+ vrev32.8 qW0, qW0
+ do_rounds(qK2, qK0, qK1, qW2, _ , qW0, _ , do_loadk, do_add, _, _)
+ vrev32.8 qW1, qW1
+ vld1.8 {qW2}, [r1]!
+ do_rounds(qK3, qK1, _ , qW3, _ , qW1, _ , _ , do_add, _, _)
+ vld1.8 {qW3}, [r1]!
+
+ vadd.u32 qH0123, qABCD0
+ vadd.u32 qH4567, qEFGH
+
+ vrev32.8 qW2, qW2
+ vmov qABCD0, qH0123
+ vrev32.8 qW3, qW3
+ vmov qEFGH, qH4567
+
+ b .Loop
+
+.Lend:
+
+ do_rounds(qK0, qK2, qK3, qW0, _ , qW2, qW3, do_loadk, do_add, _, _)
+ do_rounds(qK1, qK3, _ , qW1, _ , qW3, _ , _ , do_add, _, _)
+ do_rounds(qK2, _ , _ , qW2, _ , _ , _ , _ , _, _, _)
+ do_rounds(qK3, _ , _ , qW3, _ , _ , _ , _ , _, _, _)
+
+ CLEAR_REG(qW0)
+ CLEAR_REG(qW1)
+ CLEAR_REG(qW2)
+ CLEAR_REG(qW3)
+ CLEAR_REG(qK0)
+ CLEAR_REG(qK1)
+ CLEAR_REG(qK2)
+ CLEAR_REG(qK3)
+
+ vadd.u32 qH0123, qABCD0
+ vadd.u32 qH4567, qEFGH
+
+ CLEAR_REG(qABCD0)
+ CLEAR_REG(qABCD1)
+ CLEAR_REG(qEFGH)
+
+ vst1.32 {qH0123-qH4567}, [r0] /* store state */
+
+ CLEAR_REG(qH0123)
+ CLEAR_REG(qH4567)
+ vpop {q4-q7}
+
+.Ldo_nothing:
+ mov r0, #0
+ pop {r4,pc}
+.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;
+
+#endif
diff --git a/cipher/sha256.c b/cipher/sha256.c
index 1b82ee7..72818ce 100644
--- a/cipher/sha256.c
+++ b/cipher/sha256.c
@@ -75,6 +75,17 @@
# define USE_AVX2 1
#endif
+/* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly
+ * code. */
+#undef USE_ARM_CE
+#ifdef ENABLE_ARM_CRYPTO_SUPPORT
+# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
+ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
+ && defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
+# define USE_ARM_CE 1
+# endif
+#endif
+
typedef struct {
gcry_md_block_ctx_t bctx;
@@ -88,6 +99,9 @@ typedef struct {
#ifdef USE_AVX2
unsigned int use_avx2:1;
#endif
+#ifdef USE_ARM_CE
+ unsigned int use_arm_ce:1;
+#endif
} SHA256_CONTEXT;
@@ -129,6 +143,9 @@ sha256_init (void *context, unsigned int flags)
#ifdef USE_AVX2
hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
+#ifdef USE_ARM_CE
+ hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
+#endif
(void)features;
}
@@ -167,6 +184,9 @@ sha224_init (void *context, unsigned int flags)
#ifdef USE_AVX2
hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
+#ifdef USE_ARM_CE
+ hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
+#endif
(void)features;
}
@@ -355,6 +375,11 @@ unsigned int _gcry_sha256_transform_amd64_avx2(const void *input_data,
size_t num_blks) ASM_FUNC_ABI;
#endif
+#ifdef USE_ARM_CE
+unsigned int _gcry_sha256_transform_armv8_ce(u32 state[8],
+ const void *input_data,
+ size_t num_blks);
+#endif
static unsigned int
transform (void *ctx, const unsigned char *data, size_t nblks)
@@ -380,6 +405,11 @@ transform (void *ctx, const unsigned char *data, size_t nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
+#ifdef USE_ARM_CE
+ if (hd->use_arm_ce)
+ return _gcry_sha256_transform_armv8_ce (&hd->h0, data, nblks);
+#endif
+
do
{
burn = transform_blk (hd, data);
diff --git a/configure.ac b/configure.ac
index 613a3d6..91dd285 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2256,6 +2256,10 @@ if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-avx-amd64.lo"
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-avx2-bmi2-amd64.lo"
;;
+ arm*-*-*)
+ # Build with the assembly implementation
+ GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-arm.lo"
+ ;;
esac
fi
@@ -2273,7 +2277,7 @@ if test "$found" = "1" ; then
;;
arm*-*-*)
# Build with the assembly implementation
- GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-arm.lo"
+ GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-armv8-aarch32-ce.lo"
;;
esac
More information about the Gcrypt-devel
mailing list