[PATCH] SHA-256: Add SSSE3 implementation for x86-64
Jussi Kivilinna
jussi.kivilinna at iki.fi
Wed Dec 11 20:49:14 CET 2013
* cipher/Makefile.am: Add 'sha256-ssse3-amd64.S'.
* cipher/sha256-ssse3-amd64.S: New.
* cipher/sha256.c (USE_SSSE3): New.
(SHA256_CONTEXT) [USE_SSSE3]: Add 'use_ssse3'.
(sha256_init, sha224_init) [USE_SSSE3]: Initialize 'use_ssse3'.
(transform): Rename to...
(_transform): This.
[USE_SSSE3] (_gcry_sha256_transform_amd64_ssse3): New.
(transform): New.
* configure.ac (HAVE_INTEL_SYNTAX_PLATFORM_AS): New check.
(sha256): Add 'sha256-ssse3-amd64.lo'.
* src/g10lib.h (HWF_INTEL_SSSE3): New.
* src/global.c (hwflist): Add "intel-ssse3".
* src/hwf-x86.c (detect_x86_gnuc): Test for SSSE3.
--
Patch adds fast SSSE3 implementation of SHA-256 by Intel Corporation. The
assembly source is licensed under 3-clause BSD license, thus compatible
with LGPL2.1+. Original source can be accessed at:
http://www.intel.com/p/en_US/embedded/hwsw/technology/packet-processing#docs
Implementation is described in white paper
"Fast SHA - 256 Implementations on Intel® Architecture Processors"
http://www.intel.com/content/www/us/en/intelligent-systems/intel-technology/sha-256-implementations-paper.html
Benchmarks:
cpu Old New Diff
Intel i5-4570 13.99 c/B 10.66 c/B 1.31x
Intel i5-2450M 21.53 c/B 15.79 c/B 1.36x
Intel Core2 T8100 20.84 c/B 15.07 c/B 1.38x
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
cipher/Makefile.am | 2
cipher/sha256-ssse3-amd64.S | 526 +++++++++++++++++++++++++++++++++++++++++++
cipher/sha256.c | 45 ++++
configure.ac | 45 ++++
src/g10lib.h | 1
src/global.c | 1
src/hwf-x86.c | 3
7 files changed, 621 insertions(+), 2 deletions(-)
create mode 100644 cipher/sha256-ssse3-amd64.S
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index ff9deca..34f74e2 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -78,7 +78,7 @@ scrypt.c \
seed.c \
serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S \
sha1.c \
-sha256.c \
+sha256.c sha256-ssse3-amd64.S \
sha512.c sha512-armv7-neon.S \
stribog.c \
tiger.c \
diff --git a/cipher/sha256-ssse3-amd64.S b/cipher/sha256-ssse3-amd64.S
new file mode 100644
index 0000000..d7d5980
--- /dev/null
+++ b/cipher/sha256-ssse3-amd64.S
@@ -0,0 +1,526 @@
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright (c) 2012, Intel Corporation
+;
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are
+; met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+;
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the
+; distribution.
+;
+; * Neither the name of the Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived from
+; this software without specific prior written permission.
+;
+;
+; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
+; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; This code is described in an Intel White-Paper:
+; "Fast SHA-256 Implementations on Intel Architecture Processors"
+;
+; To find it, surf to http://www.intel.com/p/en_US/embedded
+; and search for that title.
+; The paper is expected to be released roughly at the end of April, 2012
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+*/
+/*
+ * Conversion to GAS assembly and integration to libgcrypt
+ * by Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * Note: original implementation was named as SHA256-SSE4. However, only SSSE3
+ * is required.
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
+ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA256)
+
+#ifdef __PIC__
+# define ADD_RIP +rip
+#else
+# define ADD_RIP
+#endif
+
+.intel_syntax noprefix
+
+#define MOVDQ movdqu /* assume buffers not aligned */
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/
+
+/* addm [mem], reg
+ * Add reg to mem using reg-mem add and store */
+.macro addm p1 p2
+ add \p2, \p1
+ mov \p1, \p2
+.endm
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/
+
+/* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+ * Load xmm with mem and byte swap each dword */
+.macro COPY_XMM_AND_BSWAP p1 p2 p3
+ MOVDQ \p1, \p2
+ pshufb \p1, \p3
+.endm
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/
+
+X0 = xmm4
+X1 = xmm5
+X2 = xmm6
+X3 = xmm7
+
+XTMP0 = xmm0
+XTMP1 = xmm1
+XTMP2 = xmm2
+XTMP3 = xmm3
+XTMP4 = xmm8
+XFER = xmm9
+
+SHUF_00BA = xmm10 /* shuffle xBxA -> 00BA */
+SHUF_DC00 = xmm11 /* shuffle xDxC -> DC00 */
+BYTE_FLIP_MASK = xmm12
+
+NUM_BLKS = rdx /* 3rd arg */
+CTX = rsi /* 2nd arg */
+INP = rdi /* 1st arg */
+
+SRND = rdi /* clobbers INP */
+c = ecx
+d = r8d
+e = edx
+
+TBL = rbp
+a = eax
+b = ebx
+
+f = r9d
+g = r10d
+h = r11d
+
+y0 = r13d
+y1 = r14d
+y2 = r15d
+
+
+
+#define _INP_END_SIZE 8
+#define _INP_SIZE 8
+#define _XFER_SIZE 8
+#define _XMM_SAVE_SIZE 0
+/* STACK_SIZE plus pushes must be an odd multiple of 8 */
+#define _ALIGN_SIZE 8
+
+#define _INP_END 0
+#define _INP (_INP_END + _INP_END_SIZE)
+#define _XFER (_INP + _INP_SIZE)
+#define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE)
+#define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE)
+
+/* rotate_Xs
+ * Rotate values of symbols X0...X3 */
+.macro rotate_Xs
+X_ = X0
+X0 = X1
+X1 = X2
+X2 = X3
+X3 = X_
+.endm
+
+/* ROTATE_ARGS
+ * Rotate values of symbols a...h */
+.macro ROTATE_ARGS
+TMP_ = h
+h = g
+g = f
+f = e
+e = d
+d = c
+c = b
+b = a
+a = TMP_
+.endm
+
+.macro FOUR_ROUNDS_AND_SCHED
+ /* compute s0 four at a time and s1 two at a time
+ * compute W[-16] + W[-7] 4 at a time */
+ movdqa XTMP0, X3
+ mov y0, e /* y0 = e */
+ ror y0, (25-11) /* y0 = e >> (25-11) */
+ mov y1, a /* y1 = a */
+ palignr XTMP0, X2, 4 /* XTMP0 = W[-7] */
+ ror y1, (22-13) /* y1 = a >> (22-13) */
+ xor y0, e /* y0 = e ^ (e >> (25-11)) */
+ mov y2, f /* y2 = f */
+ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */
+ movdqa XTMP1, X1
+ xor y1, a /* y1 = a ^ (a >> (22-13) */
+ xor y2, g /* y2 = f^g */
+ paddd XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */
+ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */
+ and y2, e /* y2 = (f^g)&e */
+ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */
+ /* compute s0 */
+ palignr XTMP1, X0, 4 /* XTMP1 = W[-15] */
+ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */
+ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */
+ xor y2, g /* y2 = CH = ((f^g)&e)^g */
+ movdqa XTMP2, XTMP1 /* XTMP2 = W[-15] */
+ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */
+ add y2, y0 /* y2 = S1 + CH */
+ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */
+ movdqa XTMP3, XTMP1 /* XTMP3 = W[-15] */
+ mov y0, a /* y0 = a */
+ add h, y2 /* h = h + S1 + CH + k + w */
+ mov y2, a /* y2 = a */
+ pslld XTMP1, (32-7)
+ or y0, c /* y0 = a|c */
+ add d, h /* d = d + h + S1 + CH + k + w */
+ and y2, c /* y2 = a&c */
+ psrld XTMP2, 7
+ and y0, b /* y0 = (a|c)&b */
+ add h, y1 /* h = h + S1 + CH + k + w + S0 */
+ por XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 */
+ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */
+ add h, y0 /* h = h + S1 + CH + k + w + S0 + MAJ */
+
+ROTATE_ARGS
+ movdqa XTMP2, XTMP3 /* XTMP2 = W[-15] */
+ mov y0, e /* y0 = e */
+ mov y1, a /* y1 = a */
+ movdqa XTMP4, XTMP3 /* XTMP4 = W[-15] */
+ ror y0, (25-11) /* y0 = e >> (25-11) */
+ xor y0, e /* y0 = e ^ (e >> (25-11)) */
+ mov y2, f /* y2 = f */
+ ror y1, (22-13) /* y1 = a >> (22-13) */
+ pslld XTMP3, (32-18)
+ xor y1, a /* y1 = a ^ (a >> (22-13) */
+ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */
+ xor y2, g /* y2 = f^g */
+ psrld XTMP2, 18
+ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */
+ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */
+ and y2, e /* y2 = (f^g)&e */
+ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */
+ pxor XTMP1, XTMP3
+ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */
+ xor y2, g /* y2 = CH = ((f^g)&e)^g */
+ psrld XTMP4, 3 /* XTMP4 = W[-15] >> 3 */
+ add y2, y0 /* y2 = S1 + CH */
+ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */
+ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */
+ pxor XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */
+ mov y0, a /* y0 = a */
+ add h, y2 /* h = h + S1 + CH + k + w */
+ mov y2, a /* y2 = a */
+ pxor XTMP1, XTMP4 /* XTMP1 = s0 */
+ or y0, c /* y0 = a|c */
+ add d, h /* d = d + h + S1 + CH + k + w */
+ and y2, c /* y2 = a&c */
+ /* compute low s1 */
+ pshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */
+ and y0, b /* y0 = (a|c)&b */
+ add h, y1 /* h = h + S1 + CH + k + w + S0 */
+ paddd XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */
+ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */
+ add h, y0 /* h = h + S1 + CH + k + w + S0 + MAJ */
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {BBAA} */
+ mov y0, e /* y0 = e */
+ mov y1, a /* y1 = a */
+ ror y0, (25-11) /* y0 = e >> (25-11) */
+ movdqa XTMP4, XTMP2 /* XTMP4 = W[-2] {BBAA} */
+ xor y0, e /* y0 = e ^ (e >> (25-11)) */
+ ror y1, (22-13) /* y1 = a >> (22-13) */
+ mov y2, f /* y2 = f */
+ xor y1, a /* y1 = a ^ (a >> (22-13) */
+ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */
+ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */
+ xor y2, g /* y2 = f^g */
+ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */
+ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */
+ and y2, e /* y2 = (f^g)&e */
+ psrld XTMP4, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */
+ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */
+ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */
+ xor y2, g /* y2 = CH = ((f^g)&e)^g */
+ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */
+ pxor XTMP2, XTMP3
+ add y2, y0 /* y2 = S1 + CH */
+ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */
+ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */
+ pxor XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */
+ mov y0, a /* y0 = a */
+ add h, y2 /* h = h + S1 + CH + k + w */
+ mov y2, a /* y2 = a */
+ pshufb XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */
+ or y0, c /* y0 = a|c */
+ add d, h /* d = d + h + S1 + CH + k + w */
+ and y2, c /* y2 = a&c */
+ paddd XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */
+ and y0, b /* y0 = (a|c)&b */
+ add h, y1 /* h = h + S1 + CH + k + w + S0 */
+ /* compute high s1 */
+ pshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */
+ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */
+ add h, y0 /* h = h + S1 + CH + k + w + S0 + MAJ */
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {DDCC} */
+ mov y0, e /* y0 = e */
+ ror y0, (25-11) /* y0 = e >> (25-11) */
+ mov y1, a /* y1 = a */
+ movdqa X0, XTMP2 /* X0 = W[-2] {DDCC} */
+ ror y1, (22-13) /* y1 = a >> (22-13) */
+ xor y0, e /* y0 = e ^ (e >> (25-11)) */
+ mov y2, f /* y2 = f */
+ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */
+ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */
+ xor y1, a /* y1 = a ^ (a >> (22-13) */
+ xor y2, g /* y2 = f^g */
+ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */
+ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */
+ and y2, e /* y2 = (f^g)&e */
+ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */
+ psrld X0, 10 /* X0 = W[-2] >> 10 {DDCC} */
+ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */
+ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */
+ xor y2, g /* y2 = CH = ((f^g)&e)^g */
+ pxor XTMP2, XTMP3
+ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */
+ add y2, y0 /* y2 = S1 + CH */
+ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */
+ pxor X0, XTMP2 /* X0 = s1 {xDxC} */
+ mov y0, a /* y0 = a */
+ add h, y2 /* h = h + S1 + CH + k + w */
+ mov y2, a /* y2 = a */
+ pshufb X0, SHUF_DC00 /* X0 = s1 {DC00} */
+ or y0, c /* y0 = a|c */
+ add d, h /* d = d + h + S1 + CH + k + w */
+ and y2, c /* y2 = a&c */
+ paddd X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */
+ and y0, b /* y0 = (a|c)&b */
+ add h, y1 /* h = h + S1 + CH + k + w + S0 */
+ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */
+ add h, y0 /* h = h + S1 + CH + k + w + S0 + MAJ */
+
+ROTATE_ARGS
+rotate_Xs
+.endm
+
+/* input is [rsp + _XFER + %1 * 4] */
+.macro DO_ROUND i1
+ mov y0, e /* y0 = e */
+ ror y0, (25-11) /* y0 = e >> (25-11) */
+ mov y1, a /* y1 = a */
+ xor y0, e /* y0 = e ^ (e >> (25-11)) */
+ ror y1, (22-13) /* y1 = a >> (22-13) */
+ mov y2, f /* y2 = f */
+ xor y1, a /* y1 = a ^ (a >> (22-13) */
+ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */
+ xor y2, g /* y2 = f^g */
+ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */
+ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */
+ and y2, e /* y2 = (f^g)&e */
+ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */
+ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */
+ xor y2, g /* y2 = CH = ((f^g)&e)^g */
+ add y2, y0 /* y2 = S1 + CH */
+ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */
+ add y2, [rsp + _XFER + \i1 * 4] /* y2 = k + w + S1 + CH */
+ mov y0, a /* y0 = a */
+ add h, y2 /* h = h + S1 + CH + k + w */
+ mov y2, a /* y2 = a */
+ or y0, c /* y0 = a|c */
+ add d, h /* d = d + h + S1 + CH + k + w */
+ and y2, c /* y2 = a&c */
+ and y0, b /* y0 = (a|c)&b */
+ add h, y1 /* h = h + S1 + CH + k + w + S0 */
+ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */
+ add h, y0 /* h = h + S1 + CH + k + w + S0 + MAJ */
+ ROTATE_ARGS
+.endm
+
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks)
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+;; arg 3 : Num blocks
+*/
+.text
+.globl _gcry_sha256_transform_amd64_ssse3
+.type _gcry_sha256_transform_amd64_ssse3, at function;
+.align 32
+_gcry_sha256_transform_amd64_ssse3:
+ push rbx
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp, STACK_SIZE
+
+ shl NUM_BLKS, 6 /* convert to bytes */
+ jz .Ldone_hash
+ add NUM_BLKS, INP /* pointer to end of data */
+ mov [rsp + _INP_END], NUM_BLKS
+
+ /* load initial digest */
+ mov a,[4*0 + CTX]
+ mov b,[4*1 + CTX]
+ mov c,[4*2 + CTX]
+ mov d,[4*3 + CTX]
+ mov e,[4*4 + CTX]
+ mov f,[4*5 + CTX]
+ mov g,[4*6 + CTX]
+ mov h,[4*7 + CTX]
+
+ movdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP]
+ movdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP]
+ movdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP]
+
+.Loop0:
+ lea TBL, [.LK256 ADD_RIP]
+
+ /* byte swap first 16 dwords */
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+
+ mov [rsp + _INP], INP
+
+ /* schedule 48 input dwords, by doing 3 rounds of 16 each */
+ mov SRND, 3
+.align 16
+.Loop1:
+ movdqa XFER, [TBL + 0*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 1*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 2*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 3*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ add TBL, 4*16
+ FOUR_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne .Loop1
+
+ mov SRND, 2
+.Loop2:
+ paddd X0, [TBL + 0*16]
+ movdqa [rsp + _XFER], X0
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+ paddd X1, [TBL + 1*16]
+ movdqa [rsp + _XFER], X1
+ add TBL, 2*16
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+
+ movdqa X0, X2
+ movdqa X1, X3
+
+ sub SRND, 1
+ jne .Loop2
+
+ addm [4*0 + CTX],a
+ addm [4*1 + CTX],b
+ addm [4*2 + CTX],c
+ addm [4*3 + CTX],d
+ addm [4*4 + CTX],e
+ addm [4*5 + CTX],f
+ addm [4*6 + CTX],g
+ addm [4*7 + CTX],h
+
+ mov INP, [rsp + _INP]
+ add INP, 64
+ cmp INP, [rsp + _INP_END]
+ jne .Loop0
+
+.Ldone_hash:
+ add rsp, STACK_SIZE
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+ pop rbx
+
+ mov rax, STACK_SIZE
+ ret
+
+
+.data
+.align 64
+.LK256:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+.LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203
+
+/* shuffle xBxA -> 00BA */
+.L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100
+
+/* shuffle xDxC -> DC00 */
+.L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF
+
+#endif
+#endif
diff --git a/cipher/sha256.c b/cipher/sha256.c
index bd5a412..f3c1d62 100644
--- a/cipher/sha256.c
+++ b/cipher/sha256.c
@@ -46,11 +46,25 @@
#include "cipher.h"
#include "hash-common.h"
+
+/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
+#undef USE_SSSE3
+#if defined(__x86_64__) && defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+ defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
+ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS)
+# define USE_SSSE3 1
+#endif
+
+
typedef struct {
gcry_md_block_ctx_t bctx;
u32 h0,h1,h2,h3,h4,h5,h6,h7;
+#ifdef USE_SSSE3
+ unsigned int use_ssse3:1;
+#endif
} SHA256_CONTEXT;
+
static unsigned int
transform (void *c, const unsigned char *data);
@@ -74,6 +88,10 @@ sha256_init (void *context)
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
hd->bctx.bwrite = transform;
+
+#ifdef USE_SSSE3
+ hd->use_ssse3 = (_gcry_get_hw_features () & HWF_INTEL_SSSE3) != 0;
+#endif
}
@@ -96,6 +114,10 @@ sha224_init (void *context)
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
hd->bctx.bwrite = transform;
+
+#ifdef USE_SSSE3
+ hd->use_ssse3 = (_gcry_get_hw_features () & HWF_INTEL_SSSE3) != 0;
+#endif
}
@@ -148,7 +170,7 @@ Sum1 (u32 x)
static unsigned int
-transform (void *ctx, const unsigned char *data)
+_transform (void *ctx, const unsigned char *data)
{
SHA256_CONTEXT *hd = ctx;
static const u32 K[64] = {
@@ -254,6 +276,27 @@ transform (void *ctx, const unsigned char *data)
#undef R
+#ifdef USE_SSSE3
+unsigned int _gcry_sha256_transform_amd64_ssse3(const void *input_data,
+ u32 state[8], size_t num_blks);
+#endif
+
+
+static unsigned int
+transform (void *ctx, const unsigned char *data)
+{
+ SHA256_CONTEXT *hd = ctx;
+
+#ifdef USE_SSSE3
+ if (hd->use_ssse3)
+ return _gcry_sha256_transform_amd64_ssse3 (data, &hd->h0, 1)
+ + 4 * sizeof(void*);
+#endif
+
+ return _transform (hd, data);
+}
+
+
/*
The routine finally terminates the computation and returns the
digest. The handle is prepared for a new cycle, but adding bytes
diff --git a/configure.ac b/configure.ac
index 6d40343..eb0dd29 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1077,6 +1077,44 @@ fi
#
+# Check whether GCC assembler supports features needed for our amd64 assembly
+# implementations that use Intel syntax
+#
+if test $amd64_as_feature_detection = yes; then
+ if test $gcry_cv_gcc_amd64_platform_as_ok = yes; then
+ AC_CACHE_CHECK([whether GCC assembler is compatible for Intel syntax assembly implementations],
+ [gcry_cv_gcc_amd64_platform_as_ok_for_intel_syntax],
+ [gcry_cv_gcc_amd64_platform_as_ok_for_intel_syntax=no
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+ [[__asm__(
+ ".intel_syntax noprefix\n\t"
+ "pxor xmm1, xmm7;\n\t"
+ /* Intel syntax implementation also use GAS macros, so check
+ * for them here. */
+ "VAL_A = xmm8\n\t"
+ "VAL_B = xmm9\n\t"
+ ".macro SET_VAL_A p1\n\t"
+ " VAL_A = \\\\p1 \n\t"
+ ".endm\n\t"
+ ".macro SET_VAL_B p1\n\t"
+ " VAL_B = \\\\p1 \n\t"
+ ".endm\n\t"
+ "vmovdqa VAL_A, VAL_B;\n\t"
+ "SET_VAL_A eax\n\t"
+ "SET_VAL_B r8d\n\t"
+ "add VAL_A, VAL_B;\n\t"
+ "add VAL_B, 0b10101;\n\t"
+ );]])],
+ [gcry_cv_gcc_amd64_platform_as_ok_for_intel_syntax=yes])])
+ if test "$gcry_cv_gcc_amd64_platform_as_ok_for_intel_syntax" = "yes" ; then
+ AC_DEFINE(HAVE_INTEL_SYNTAX_PLATFORM_AS,1,
+ [Defined if underlying assembler is compatible with Intel syntax assembly implementations])
+ fi
+ fi
+fi
+
+
+#
# Check whether compiler is configured for ARMv6 or newer architecture
#
AC_CACHE_CHECK([whether compiler is configured for ARMv6 or newer architecture],
@@ -1676,6 +1714,13 @@ LIST_MEMBER(sha256, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256.lo"
AC_DEFINE(USE_SHA256, 1, [Defined if this module should be included])
+
+ case "${host}" in
+ x86_64-*-*)
+ # Build with the assembly implementation
+ GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256-ssse3-amd64.lo"
+ ;;
+ esac
fi
LIST_MEMBER(sha512, $enabled_digests)
diff --git a/src/g10lib.h b/src/g10lib.h
index a326ad5..1dcadfa 100644
--- a/src/g10lib.h
+++ b/src/g10lib.h
@@ -167,6 +167,7 @@ int _gcry_log_verbosity( int level );
#define HWF_PADLOCK_SHA 4
#define HWF_PADLOCK_MMUL 8
+#define HWF_INTEL_SSSE3 64
#define HWF_INTEL_PCLMUL 128
#define HWF_INTEL_AESNI 256
#define HWF_INTEL_RDRAND 512
diff --git a/src/global.c b/src/global.c
index 8a5d310..de99286 100644
--- a/src/global.c
+++ b/src/global.c
@@ -66,6 +66,7 @@ static struct
{ HWF_PADLOCK_AES, "padlock-aes" },
{ HWF_PADLOCK_SHA, "padlock-sha" },
{ HWF_PADLOCK_MMUL,"padlock-mmul"},
+ { HWF_INTEL_SSSE3, "intel-ssse3" },
{ HWF_INTEL_PCLMUL,"intel-pclmul" },
{ HWF_INTEL_AESNI, "intel-aesni" },
{ HWF_INTEL_RDRAND,"intel-rdrand" },
diff --git a/src/hwf-x86.c b/src/hwf-x86.c
index 784fe2a..ab6dacd 100644
--- a/src/hwf-x86.c
+++ b/src/hwf-x86.c
@@ -206,6 +206,9 @@ detect_x86_gnuc (void)
if (features & 0x00000002)
result |= HWF_INTEL_PCLMUL;
#endif
+ /* Test bit 9 for SSSE3. */
+ if (features & 0x00000200)
+ result |= HWF_INTEL_SSSE3;
#ifdef ENABLE_AESNI_SUPPORT
/* Test bit 25 for AES-NI. */
if (features & 0x02000000)
More information about the Gcrypt-devel
mailing list