[PATCH 2/2] SHA-512: Add AVX and AVX2 implementations for x86-64

Jussi Kivilinna jussi.kivilinna at iki.fi
Thu Dec 12 15:00:03 CET 2013


* cipher/Makefile.am: Add 'sha512-avx-amd64.S' and
'sha512-avx2-bmi2-amd64.S'.
* cipher/sha512-avx-amd64.S: New.
* cipher/sha512-avx2-bmi2-amd64.S: New.
* cipher/sha512.c (USE_AVX, USE_AVX2): New.
(SHA512_CONTEXT) [USE_AVX]: Add 'use_avx'.
(SHA512_CONTEXT) [USE_AVX2]: Add 'use_avx2'.
(sha512_init, sha384_init) [USE_AVX]: Initialize 'use_avx'.
(sha512_init, sha384_init) [USE_AVX2]: Initialize 'use_avx2'.
[USE_AVX] (_gcry_sha512_transform_amd64_avx): New.
[USE_AVX2] (_gcry_sha512_transform_amd64_avx2): New.
(transform) [USE_AVX2]: Add call for AVX2 implementation.
(transform) [USE_AVX]: Add call for AVX implementation.
* configure.ac (HAVE_GCC_INLINE_ASM_BMI2): New check.
(sha512): Add 'sha512-avx-amd64.lo' and 'sha512-avx2-bmi2-amd64.lo'.
* src/g10lib.h (HWF_INTEL_CPU, HWF_INTEL_BMI2): New.
* src/global.c (hwflist): Add "intel-cpu" and "intel-bmi2".
* src/hwf-x86.c (detect_x86_gnuc): Check for HWF_INTEL_CPU and
HWF_INTEL_BMI2.
--

Patch adds fast AVX and AVX2 implementation of SHA-512 by Intel Corporation.
The assembly source is licensed under 3-clause BSD license, thus compatible
with LGPL2.1+. Original source can be accessed at:
 http://www.intel.com/p/en_US/embedded/hwsw/technology/packet-processing#docs

Implementation is described in white paper
 "Fast SHA512 Implementations on Intel® Architecture Processors"
 http://www.intel.com/content/www/us/en/intelligent-systems/intel-technology/fast-sha512-implementat$

Note: AVX implementation uses SHLD instruction to emulate RORQ, since it's
      faster on Intel Sandy-Bridge. However, on non-Intel CPUs SHLD is much
      slower than RORQ, so therefore AVX implementation is (for now) limited
      to Intel CPUs.
Note: AVX2 implementation also uses BMI2 instruction rorx, thus additional
      HWF flag.

Benchmarks:

cpu                 Old         SSSE3       AVX/AVX2   Old vs AVX/AVX2
                                                              vs SSSE3
Intel i5-4570       10.11 c/B    7.56 c/B   6.72 c/B   1.50x  1.12x
Intel i5-2450M      14.11 c/B   10.53 c/B   8.88 c/B   1.58x  1.18x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
 cipher/Makefile.am              |    2 
 cipher/sha512-avx-amd64.S       |  412 +++++++++++++++++++++
 cipher/sha512-avx2-bmi2-amd64.S |  783 +++++++++++++++++++++++++++++++++++++++
 cipher/sha512.c                 |   72 +++-
 configure.ac                    |   19 +
 src/g10lib.h                    |    2 
 src/global.c                    |    2 
 src/hwf-x86.c                   |    7 
 8 files changed, 1293 insertions(+), 6 deletions(-)
 create mode 100644 cipher/sha512-avx-amd64.S
 create mode 100644 cipher/sha512-avx2-bmi2-amd64.S

diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index 88c288a..7c85af2 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -78,7 +78,7 @@ scrypt.c \
 seed.c \
 serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S \
 sha1.c \
-sha256.c sha256-ssse3-amd64.S \
+sha256.c sha256-ssse3-amd64.S sha256-avx-amd64.S sha256-avx2-bmi2-amd64.S \
 sha512.c sha512-ssse3-amd64.S sha512-armv7-neon.S \
 stribog.c \
 tiger.c \
diff --git a/cipher/sha512-avx-amd64.S b/cipher/sha512-avx-amd64.S
new file mode 100644
index 0000000..01c1daa
--- /dev/null
+++ b/cipher/sha512-avx-amd64.S
@@ -0,0 +1,412 @@
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright (c) 2012, Intel Corporation 
+; 
+; All rights reserved. 
+; 
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are
+; met: 
+; 
+; * Redistributions of source code must retain the above copyright
+;   notice, this list of conditions and the following disclaimer.  
+; 
+; * Redistributions in binary form must reproduce the above copyright
+;   notice, this list of conditions and the following disclaimer in the
+;   documentation and/or other materials provided with the
+;   distribution. 
+; 
+; * Neither the name of the Intel Corporation nor the names of its
+;   contributors may be used to endorse or promote products derived from
+;   this software without specific prior written permission. 
+; 
+; 
+; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
+; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+*/
+/*
+ * Conversion to GAS assembly and integration to libgcrypt
+ *  by Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA512)
+
+#ifdef __PIC__
+#  define ADD_RIP +rip
+#else
+#  define ADD_RIP
+#endif
+
+.intel_syntax noprefix
+
+.text
+
+/* Virtual Registers */
+msg = rdi /* ARG1 */
+digest = rsi /* ARG2 */
+msglen = rdx /* ARG3 */
+T1 = rcx
+T2 = r8
+a_64 = r9
+b_64 = r10
+c_64 = r11
+d_64 = r12
+e_64 = r13
+f_64 = r14
+g_64 = r15
+h_64 = rbx
+tmp0 = rax
+
+/*
+; Local variables (stack frame)
+; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP
+*/
+frame_W      = 0 /* Message Schedule */
+frame_W_size = (80 * 8)
+frame_WK      = ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */
+frame_WK_size = (2 * 8)
+frame_GPRSAVE      = ((frame_WK) + (frame_WK_size))
+frame_GPRSAVE_size = (5 * 8)
+frame_size = ((frame_GPRSAVE) + (frame_GPRSAVE_size))
+
+
+/* Useful QWORD "arrays" for simpler memory references */
+#define MSG(i)    msg    + 8*(i)               /* Input message (arg1) */
+#define DIGEST(i) digest + 8*(i)               /* Output Digest (arg2) */
+#define K_t(i)    .LK512   + 8*(i) ADD_RIP     /* SHA Constants (static mem) */
+#define W_t(i)    rsp + frame_W  + 8*(i)       /* Message Schedule (stack frame) */
+#define WK_2(i)   rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */
+/* MSG, DIGEST, K_t, W_t are arrays */
+/* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */
+
+.macro RotateState
+	/* Rotate symbles a..h right */
+	__TMP = h_64
+	h_64 =  g_64
+	g_64 =  f_64
+	f_64 =  e_64
+	e_64 =  d_64
+	d_64 =  c_64
+	c_64 =  b_64
+	b_64 =  a_64
+	a_64 =  __TMP
+.endm
+
+.macro RORQ p1 p2
+	/* shld is faster than ror on Intel Sandybridge */
+	shld	\p1, \p1, (64 - \p2)
+.endm
+
+.macro SHA512_Round t
+	/* Compute Round %%t */
+	mov	T1,   f_64        /* T1 = f */
+	mov	tmp0, e_64        /* tmp = e */
+	xor	T1,   g_64        /* T1 = f ^ g */
+	RORQ	tmp0, 23 /* 41     ; tmp = e ror 23 */
+	and	T1,   e_64        /* T1 = (f ^ g) & e */
+	xor	tmp0, e_64        /* tmp = (e ror 23) ^ e */
+	xor	T1,   g_64        /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */
+	add	T1,   [WK_2(\t)] /* W[t] + K[t] from message scheduler */
+	RORQ	tmp0, 4 /* 18      ; tmp = ((e ror 23) ^ e) ror 4 */
+	xor	tmp0, e_64        /* tmp = (((e ror 23) ^ e) ror 4) ^ e */
+	mov	T2,   a_64        /* T2 = a */
+	add	T1,   h_64        /* T1 = CH(e,f,g) + W[t] + K[t] + h */
+	RORQ	tmp0, 14 /* 14     ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */
+	add	T1,   tmp0        /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */
+	mov	tmp0, a_64        /* tmp = a */
+	xor	T2,   c_64        /* T2 = a ^ c */
+	and	tmp0, c_64        /* tmp = a & c */
+	and	T2,   b_64        /* T2 = (a ^ c) & b */
+	xor	T2,   tmp0        /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */
+	mov	tmp0, a_64        /* tmp = a */
+	RORQ	tmp0, 5 /* 39      ; tmp = a ror 5 */
+	xor	tmp0, a_64        /* tmp = (a ror 5) ^ a */
+	add	d_64, T1          /* e(next_state) = d + T1  */
+	RORQ	tmp0, 6 /* 34      ; tmp = ((a ror 5) ^ a) ror 6 */
+	xor	tmp0, a_64        /* tmp = (((a ror 5) ^ a) ror 6) ^ a */
+	lea	h_64, [T1 + T2]   /* a(next_state) = T1 + Maj(a,b,c) */
+	RORQ	tmp0, 28 /* 28     ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */
+	add	h_64, tmp0        /* a(next_state) = T1 + Maj(a,b,c) S0(a) */
+	RotateState
+.endm
+
+.macro SHA512_2Sched_2Round_avx t
+/*	; Compute rounds %%t-2 and %%t-1
+	; Compute message schedule QWORDS %%t and %%t+1
+
+	;   Two rounds are computed based on the values for K[t-2]+W[t-2] and 
+	; K[t-1]+W[t-1] which were previously stored at WK_2 by the message
+	; scheduler.
+	;   The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)].
+	; They are then added to their respective SHA512 constants at
+	; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)]
+	;   For brievity, the comments following vectored instructions only refer to
+	; the first of a pair of QWORDS.
+	; Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
+	;   The computation of the message schedule and the rounds are tightly
+	; stitched to take advantage of instruction-level parallelism.
+	; For clarity, integer instructions (for the rounds calculation) are indented
+	; by one tab. Vectored instructions (for the message scheduler) are indented
+	; by two tabs. */
+
+		vmovdqa	xmm4, [W_t(\t-2)]   /* XMM4 = W[t-2] */
+		vmovdqu	xmm5, [W_t(\t-15)]  /* XMM5 = W[t-15] */
+	mov	T1,   f_64
+		vpsrlq	xmm0, xmm4, 61       /* XMM0 = W[t-2]>>61 */
+	mov	tmp0, e_64
+		vpsrlq	xmm6, xmm5, 1        /* XMM6 = W[t-15]>>1 */
+	xor	T1,   g_64
+	RORQ	tmp0, 23 /* 41 */
+		vpsrlq	xmm1, xmm4, 19       /* XMM1 = W[t-2]>>19 */
+	and	T1,   e_64
+	xor	tmp0, e_64
+		vpxor	xmm0, xmm0, xmm1           /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 */
+	xor	T1,   g_64
+	add	T1,   [WK_2(\t)];
+		vpsrlq	xmm7, xmm5, 8        /* XMM7 = W[t-15]>>8 */
+	RORQ	tmp0, 4 /* 18 */
+		vpsrlq	xmm2, xmm4, 6        /* XMM2 = W[t-2]>>6 */
+	xor	tmp0, e_64
+	mov	T2,   a_64
+	add	T1,   h_64
+		vpxor	xmm6, xmm6, xmm7           /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 */
+	RORQ	tmp0, 14 /* 14 */
+	add	T1,   tmp0
+		vpsrlq	xmm8, xmm5, 7        /* XMM8 = W[t-15]>>7 */
+	mov 	tmp0, a_64
+	xor	T2,   c_64
+		vpsllq	xmm3, xmm4, (64-61)  /* XMM3 = W[t-2]<<3 */
+	and	tmp0, c_64
+	and	T2,   b_64
+		vpxor	xmm2, xmm2, xmm3           /* XMM2 = W[t-2]>>6 ^ W[t-2]<<3 */
+	xor	T2,   tmp0
+	mov	tmp0, a_64
+		vpsllq	xmm9, xmm5, (64-1)   /* XMM9 = W[t-15]<<63 */
+	RORQ	tmp0, 5 /* 39 */
+		vpxor	xmm8, xmm8, xmm9           /* XMM8 = W[t-15]>>7 ^ W[t-15]<<63 */
+	xor	tmp0, a_64
+	add	d_64, T1
+	RORQ	tmp0, 6 /* 34 */
+	xor	tmp0, a_64
+		vpxor	xmm6, xmm6, xmm8           /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ W[t-15]>>7 ^ W[t-15]<<63 */
+	lea	h_64, [T1 + T2]
+	RORQ 	tmp0, 28 /* 28 */
+		vpsllq	xmm4, xmm4, (64-19)        /* XMM4 = W[t-2]<<25 */
+	add	h_64, tmp0
+	RotateState
+		vpxor	xmm0, xmm0, xmm4           /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ W[t-2]<<25 */
+	mov	T1, f_64
+		vpxor	xmm0, xmm0, xmm2           /* XMM0 = s1(W[t-2]) */
+	mov	tmp0, e_64
+	xor	T1,   g_64
+		vpaddq	xmm0, xmm0, [W_t(\t-16)]  /* XMM0 = s1(W[t-2]) + W[t-16] */
+		vmovdqu	xmm1, [W_t(\t- 7)]  /* XMM1 = W[t-7] */
+	RORQ	tmp0, 23 /* 41 */
+	and	T1,   e_64
+	xor	tmp0, e_64
+	xor	T1,   g_64
+		vpsllq	xmm5, xmm5, (64-8)         /* XMM5 = W[t-15]<<56 */
+	add	T1,   [WK_2(\t+1)]
+		vpxor	xmm6, xmm6, xmm5           /* XMM6 = s0(W[t-15]) */
+	RORQ	tmp0, 4 /* 18 */
+		vpaddq	xmm0, xmm0, xmm6           /* XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) */
+	xor	tmp0, e_64
+		vpaddq	xmm0, xmm0, xmm1           /* XMM0 = W[t] = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */
+	mov	T2,   a_64
+	add	T1,   h_64
+	RORQ	tmp0, 14 /* 14 */
+	add	T1,   tmp0
+		vmovdqa	[W_t(\t)], xmm0      /* Store W[t] */
+		vpaddq	xmm0, xmm0, [K_t(t)]        /* Compute W[t]+K[t] */
+		vmovdqa	[WK_2(t)], xmm0       /* Store W[t]+K[t] for next rounds */
+	mov	tmp0, a_64
+	xor	T2,   c_64
+	and	tmp0, c_64
+	and	T2,   b_64
+	xor	T2,   tmp0
+	mov	tmp0, a_64
+	RORQ	tmp0, 5 /* 39 */
+	xor	tmp0, a_64
+	add	d_64, T1
+	RORQ	tmp0, 6 /* 34 */
+	xor	tmp0, a_64
+	lea	h_64, [T1 + T2]
+	RORQ	tmp0, 28 /* 28 */
+	add	h_64, tmp0
+	RotateState
+.endm
+
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; void sha512_avx(const void* M, void* D, uint64_t L);
+; Purpose: Updates the SHA512 digest stored at D with the message stored in M.
+; The size of the message pointed to by M must be an integer multiple of SHA512
+;   message blocks.
+; L is the message length in SHA512 blocks
+*/
+.globl _gcry_sha512_transform_amd64_avx
+.type _gcry_sha512_transform_amd64_avx, at function;
+.align 16
+_gcry_sha512_transform_amd64_avx:
+	xor eax, eax
+
+	cmp	msglen, 0
+	je	.Lnowork
+	
+	/* Allocate Stack Space */
+	sub	rsp, frame_size
+
+	/* Save GPRs */
+	mov	[rsp + frame_GPRSAVE + 8 * 0], rbx
+	mov	[rsp + frame_GPRSAVE + 8 * 1], r12
+	mov	[rsp + frame_GPRSAVE + 8 * 2], r13
+	mov	[rsp + frame_GPRSAVE + 8 * 3], r14
+	mov	[rsp + frame_GPRSAVE + 8 * 4], r15
+
+.Lupdateblock:
+
+	/* Load state variables */
+	mov	a_64, [DIGEST(0)]
+	mov	b_64, [DIGEST(1)]
+	mov	c_64, [DIGEST(2)]
+	mov	d_64, [DIGEST(3)]
+	mov	e_64, [DIGEST(4)]
+	mov	f_64, [DIGEST(5)]
+	mov	g_64, [DIGEST(6)]
+	mov	h_64, [DIGEST(7)]
+
+	t = 0
+	.rept 80/2 + 1
+	/* (80 rounds) / (2 rounds/iteration) + (1 iteration) */
+	/* +1 iteration because the scheduler leads hashing by 1 iteration */
+		.if t < 2
+			/* BSWAP 2 QWORDS */
+			vmovdqa	xmm1, [.LXMM_QWORD_BSWAP ADD_RIP]
+			vmovdqu	xmm0, [MSG(t)]
+			vpshufb	xmm0, xmm0, xmm1     /* BSWAP */
+			vmovdqa	[W_t(t)], xmm0       /* Store Scheduled Pair */
+			vpaddq	xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */
+			vmovdqa	[WK_2(t)], xmm0      /* Store into WK for rounds */
+		.elseif t < 16
+			/* BSWAP 2 QWORDS, Compute 2 Rounds */
+			vmovdqu	xmm0, [MSG(t)]
+			vpshufb	xmm0, xmm0, xmm1     /* BSWAP */
+			SHA512_Round (t - 2)         /* Round t-2 */
+			vmovdqa	[W_t(t)], xmm0       /* Store Scheduled Pair */
+			vpaddq	xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */
+			SHA512_Round (t - 1)         /* Round t-1 */
+			vmovdqa	[WK_2(t)], xmm0      /* W[t]+K[t] into WK */
+		.elseif t < 79
+			/* Schedule 2 QWORDS; Compute 2 Rounds */
+			SHA512_2Sched_2Round_avx t
+		.else
+			/* Compute 2 Rounds */
+			SHA512_Round (t - 2)
+			SHA512_Round (t - 1)
+		.endif
+		t = ((t)+2)
+	.endr
+
+	/* Update digest */
+	add	[DIGEST(0)], a_64
+	add	[DIGEST(1)], b_64
+	add	[DIGEST(2)], c_64
+	add	[DIGEST(3)], d_64
+	add	[DIGEST(4)], e_64
+	add	[DIGEST(5)], f_64
+	add	[DIGEST(6)], g_64
+	add	[DIGEST(7)], h_64
+
+	/* Advance to next message block */
+	add	msg, 16*8
+	dec	msglen
+	jnz	.Lupdateblock
+
+	/* Restore GPRs */
+	mov	rbx, [rsp + frame_GPRSAVE + 8 * 0]
+	mov	r12, [rsp + frame_GPRSAVE + 8 * 1]
+	mov	r13, [rsp + frame_GPRSAVE + 8 * 2]
+	mov	r14, [rsp + frame_GPRSAVE + 8 * 3]
+	mov	r15, [rsp + frame_GPRSAVE + 8 * 4]
+
+	/* Restore Stack Pointer */
+	add	rsp, frame_size
+
+	/* Return stack burn depth */
+	mov	rax, frame_size
+
+.Lnowork:
+	ret
+
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;; Binary Data
+*/
+
+.data
+
+.align 16
+
+/* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */
+.LXMM_QWORD_BSWAP:
+	.octa 0x08090a0b0c0d0e0f0001020304050607
+
+/* K[t] used in SHA512 hashing */
+.LK512:
+	.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad 0x3956c25bf348b538,0x59f111f1b605d019
+	.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad 0xd807aa98a3030242,0x12835b0145706fbe
+	.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad 0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad 0x06ca6351e003826f,0x142929670a0e6e70
+	.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad 0x81c2c92e47edaee6,0x92722c851482353b
+	.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad 0xd192e819d6ef5218,0xd69906245565a910
+	.quad 0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad 0x90befffa23631e28,0xa4506cebde82bde9
+	.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad 0xca273eceea26619c,0xd186b8c721c0c207
+	.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad 0x113f9804bef90dae,0x1b710b35131c471b
+	.quad 0x28db77f523047d84,0x32caab7b40c72493
+	.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+#endif
+#endif
diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S
new file mode 100644
index 0000000..9573c8d
--- /dev/null
+++ b/cipher/sha512-avx2-bmi2-amd64.S
@@ -0,0 +1,783 @@
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright (c) 2012, Intel Corporation
+;
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are
+; met:
+;
+; * Redistributions of source code must retain the above copyright
+;   notice, this list of conditions and the following disclaimer. 
+;
+; * Redistributions in binary form must reproduce the above copyright
+;   notice, this list of conditions and the following disclaimer in the
+;   documentation and/or other materials provided with the
+;   distribution.
+;
+; * Neither the name of the Intel Corporation nor the names of its
+;   contributors may be used to endorse or promote products derived from
+;   this software without specific prior written permission.
+;
+;
+; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
+; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+*/
+/*
+ * Conversion to GAS assembly and integration to libgcrypt
+ *  by Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \
+    defined(USE_SHA512)
+
+#ifdef __PIC__
+#  define ADD_RIP +rip
+#else
+#  define ADD_RIP
+#endif
+
+.intel_syntax noprefix
+
+.text
+
+/* Virtual Registers */
+Y_0 = ymm4
+Y_1 = ymm5
+Y_2 = ymm6
+Y_3 = ymm7
+
+YTMP0 = ymm0
+YTMP1 = ymm1
+YTMP2 = ymm2
+YTMP3 = ymm3
+YTMP4 = ymm8
+XFER =  YTMP0
+
+BYTE_FLIP_MASK =  ymm9
+
+INP =         rdi /* 1st arg */
+CTX =         rsi /* 2nd arg */
+NUM_BLKS =    rdx /* 3rd arg */
+c =           rcx
+d =           r8
+e =           rdx
+y3 =          rdi
+
+TBL =   rbp
+	     
+a =     rax
+b =     rbx
+	     
+f =     r9
+g =     r10
+h =     r11
+old_h = r11
+
+T1 =    r12
+y0 =    r13
+y1 =    r14
+y2 =    r15
+
+y4 =    r12
+
+/* Local variables (stack frame) */
+#define frame_XFER      0
+#define frame_XFER_size (4*8)
+#define frame_SRND      (frame_XFER + frame_XFER_size)
+#define frame_SRND_size (1*8)
+#define frame_INP      (frame_SRND + frame_SRND_size)
+#define frame_INP_size (1*8)
+#define frame_INPEND      (frame_INP + frame_INP_size)
+#define frame_INPEND_size (1*8)
+#define frame_RSPSAVE      (frame_INPEND + frame_INPEND_size)
+#define frame_RSPSAVE_size (1*8)
+#define frame_GPRSAVE      (frame_RSPSAVE + frame_RSPSAVE_size)
+#define frame_GPRSAVE_size (6*8)
+#define frame_size (frame_GPRSAVE + frame_GPRSAVE_size)
+
+#define	VMOVDQ vmovdqu /*; assume buffers not aligned  */
+
+/* addm [mem], reg */
+/* Add reg to mem using reg-mem add and store */
+.macro addm p1 p2
+	add	\p2, \p1
+	mov	\p1, \p2
+.endm
+
+
+/* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */
+/* Load ymm with mem and byte swap each dword */
+.macro COPY_YMM_AND_BSWAP p1 p2 p3
+	VMOVDQ \p1, \p2
+	vpshufb \p1, \p1, \p3
+.endm
+/* rotate_Ys */
+/* Rotate values of symbols Y0...Y3 */
+.macro rotate_Ys
+	__Y_ = Y_0
+	Y_0 = Y_1
+	Y_1 = Y_2
+	Y_2 = Y_3
+	Y_3 = __Y_
+.endm
+
+/* RotateState */
+.macro RotateState
+	/* Rotate symbles a..h right */
+	old_h =  h
+	__TMP_ = h
+	h =      g
+	g =      f
+	f =      e
+	e =      d
+	d =      c
+	c =      b
+	b =      a
+	a =      __TMP_
+.endm
+
+/* %macro MY_VPALIGNR	YDST, YSRC1, YSRC2, RVAL */
+/* YDST = {YSRC1, YSRC2} >> RVAL*8 */
+.macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL
+	vperm2f128 	\YDST, \YSRC1, \YSRC2, 0x3	/* YDST = {YS1_LO, YS2_HI} */
+	vpalignr 	\YDST, \YDST, \YSRC2, \RVAL	/* YDST = {YDS1, YS2} >> RVAL*8 */
+.endm
+
+.macro FOUR_ROUNDS_AND_SCHED
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+		/* Extract w[t-7] */
+		MY_VPALIGNR	YTMP0, Y_3, Y_2, 8		/* YTMP0 = W[-7] */
+		/* Calculate w[t-16] + w[t-7] */
+		vpaddq		YTMP0, YTMP0, Y_0		/* YTMP0 = W[-7] + W[-16] */
+		/* Extract w[t-15] */
+		MY_VPALIGNR	YTMP1, Y_1, Y_0, 8		/* YTMP1 = W[-15] */
+
+		/* Calculate sigma0 */
+
+		/* Calculate w[t-15] ror 1 */
+		vpsrlq		YTMP2, YTMP1, 1
+		vpsllq		YTMP3, YTMP1, (64-1)
+		vpor		YTMP3, YTMP3, YTMP2		/* YTMP3 = W[-15] ror 1 */
+		/* Calculate w[t-15] shr 7 */
+		vpsrlq		YTMP4, YTMP1, 7			/* YTMP4 = W[-15] >> 7 */
+
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+
+	add	h, [rsp+frame_XFER+0*8]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+	add	h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	add	h, y3		/* h = t1 + S0 + MAJ                            ; --	 */
+
+RotateState
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+		/* Calculate w[t-15] ror 8 */
+		vpsrlq		YTMP2, YTMP1, 8
+		vpsllq		YTMP1, YTMP1, (64-8)
+		vpor		YTMP1, YTMP1, YTMP2		/* YTMP1 = W[-15] ror 8 */
+		/* XOR the three components */
+		vpxor		YTMP3, YTMP3, YTMP4		/* YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 */
+		vpxor		YTMP1, YTMP3, YTMP1		/* YTMP1 = s0 */
+
+
+		/* Add three components, w[t-16], w[t-7] and sigma0 */
+		vpaddq		YTMP0, YTMP0, YTMP1		/* YTMP0 = W[-16] + W[-7] + s0 */
+		/* Move to appropriate lanes for calculating w[16] and w[17] */
+		vperm2f128	Y_0, YTMP0, YTMP0, 0x0		/* Y_0 = W[-16] + W[-7] + s0 {BABA} */
+		/* Move to appropriate lanes for calculating w[18] and w[19] */
+		vpand		YTMP0, YTMP0, [.LMASK_YMM_LO ADD_RIP]	/* YTMP0 = W[-16] + W[-7] + s0 {DC00} */
+
+		/* Calculate w[16] and w[17] in both 128 bit lanes */
+
+		/* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */
+		vperm2f128	YTMP2, Y_3, Y_3, 0x11		/* YTMP2 = W[-2] {BABA} */
+		vpsrlq		YTMP4, YTMP2, 6			/* YTMP4 = W[-2] >> 6 {BABA} */
+
+
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	add	h, [rsp+frame_XFER+1*8]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+	add	h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	add	h, y3		/* h = t1 + S0 + MAJ                            ; --	 */
+
+RotateState
+
+
+
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+
+		vpsrlq		YTMP3, YTMP2, 19		/* YTMP3 = W[-2] >> 19 {BABA} */
+		vpsllq		YTMP1, YTMP2, (64-19)		/* YTMP1 = W[-2] << 19 {BABA} */
+		vpor		YTMP3, YTMP3, YTMP1		/* YTMP3 = W[-2] ror 19 {BABA} */
+		vpxor		YTMP4, YTMP4, YTMP3		/* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} */
+		vpsrlq		YTMP3, YTMP2, 61		/* YTMP3 = W[-2] >> 61 {BABA} */
+		vpsllq		YTMP1, YTMP2, (64-61)		/* YTMP1 = W[-2] << 61 {BABA} */
+		vpor		YTMP3, YTMP3, YTMP1		/* YTMP3 = W[-2] ror 61 {BABA} */
+		vpxor		YTMP4, YTMP4, YTMP3		/* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */
+
+		/* Add sigma1 to the other compunents to get w[16] and w[17] */
+		vpaddq		Y_0, Y_0, YTMP4			/* Y_0 = {W[1], W[0], W[1], W[0]} */
+
+		/* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */
+		vpsrlq		YTMP4, Y_0, 6			/* YTMP4 = W[-2] >> 6 {DC--} */
+
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	add	h, [rsp+frame_XFER+2*8]		/* h = k + w + h                                ; --	 */
+
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+	add	h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+
+	add	h, y3		/* h = t1 + S0 + MAJ                            ; --	 */
+
+RotateState
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+		vpsrlq		YTMP3, Y_0, 19			/* YTMP3 = W[-2] >> 19 {DC--} */
+		vpsllq		YTMP1, Y_0, (64-19)		/* YTMP1 = W[-2] << 19 {DC--} */
+		vpor		YTMP3, YTMP3, YTMP1		/* YTMP3 = W[-2] ror 19 {DC--} */
+		vpxor		YTMP4, YTMP4, YTMP3		/* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} */
+		vpsrlq		YTMP3, Y_0, 61			/* YTMP3 = W[-2] >> 61 {DC--} */
+		vpsllq		YTMP1, Y_0, (64-61)		/* YTMP1 = W[-2] << 61 {DC--} */
+		vpor		YTMP3, YTMP3, YTMP1		/* YTMP3 = W[-2] ror 61 {DC--} */
+		vpxor		YTMP4, YTMP4, YTMP3		/* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */
+
+		/* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */
+		vpaddq		YTMP2, YTMP0, YTMP4		/* YTMP2 = {W[3], W[2], --, --} */
+
+		/* Form w[19, w[18], w17], w[16] */
+		vpblendd		Y_0, Y_0, YTMP2, 0xF0		/* Y_0 = {W[3], W[2], W[1], W[0]} */
+/*		vperm2f128		Y_0, Y_0, YTMP2, 0x30 */
+
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	add	h, [rsp+frame_XFER+3*8]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+	add	h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	add	h, y3		/* h = t1 + S0 + MAJ                            ; --	 */
+
+RotateState
+
+rotate_Ys
+.endm
+
+.macro DO_4ROUNDS
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+	add	h, [rsp + frame_XFER + 8*0]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+
+	/*add	h, y2		; h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+
+	/*add	h, y3		; h = t1 + S0 + MAJ                            ; --	 */
+
+	RotateState
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+	add	old_h, y2	/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	add	old_h, y3	/* h = t1 + S0 + MAJ                            ; --	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+	add	h, [rsp + frame_XFER + 8*1]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+
+	/*add	h, y2		; h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+
+	/*add	h, y3		; h = t1 + S0 + MAJ                            ; --	 */
+
+	RotateState
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+	add	old_h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	add	old_h, y3	/* h = t1 + S0 + MAJ                            ; --	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+	add	h, [rsp + frame_XFER + 8*2]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+
+	/*add	h, y2		; h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+
+	/*add	h, y3		; h = t1 + S0 + MAJ                            ; --	 */
+
+	RotateState
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+
+	add	old_h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+	mov	y2, f		/* y2 = f                                       ; CH	 */
+	rorx	y0, e, 41	/* y0 = e >> 41					; S1A */
+	rorx	y1, e, 18	/* y1 = e >> 18					; S1B */
+	xor	y2, g		/* y2 = f^g                                     ; CH	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18)			; S1 */
+	rorx	y1, e, 14	/* y1 = (e >> 14)					; S1 */
+	and	y2, e		/* y2 = (f^g)&e                                 ; CH	 */
+	add	old_h, y3	/* h = t1 + S0 + MAJ                            ; --	 */
+
+	xor	y0, y1		/* y0 = (e>>41) ^ (e>>18) ^ (e>>14)		; S1 */
+	rorx	T1, a, 34	/* T1 = a >> 34					; S0B */
+	xor	y2, g		/* y2 = CH = ((f^g)&e)^g                        ; CH	 */
+	rorx	y1, a, 39	/* y1 = a >> 39					; S0A */
+	mov	y3, a		/* y3 = a                                       ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34)			; S0 */
+	rorx	T1, a, 28	/* T1 = (a >> 28)					; S0 */
+	add	h, [rsp + frame_XFER + 8*3]		/* h = k + w + h                                ; --	 */
+	or	y3, c		/* y3 = a|c                                     ; MAJA	 */
+
+	xor	y1, T1		/* y1 = (a>>39) ^ (a>>34) ^ (a>>28)		; S0 */
+	mov	T1, a		/* T1 = a                                       ; MAJB	 */
+	and	y3, b		/* y3 = (a|c)&b                                 ; MAJA	 */
+	and	T1, c		/* T1 = a&c                                     ; MAJB	 */
+	add	y2, y0		/* y2 = S1 + CH                                 ; --	 */
+
+
+	add	d, h		/* d = k + w + h + d                            ; --	 */
+	or	y3, T1		/* y3 = MAJ = (a|c)&b)|(a&c)                    ; MAJ	 */
+	add	h, y1		/* h = k + w + h + S0                           ; --	 */
+
+	add	d, y2		/* d = k + w + h + d + S1 + CH = d + t1         ; --	 */
+
+
+	add	h, y2		/* h = k + w + h + S0 + S1 + CH = t1 + S0       ; --	 */
+
+	add	h, y3		/* h = t1 + S0 + MAJ                            ; --	 */
+
+	RotateState
+
+.endm
+
+/*
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; void sha512_rorx(const void* M, void* D, uint64_t L);
+; Purpose: Updates the SHA512 digest stored at D with the message stored in M.
+; The size of the message pointed to by M must be an integer multiple of SHA512
+;   message blocks.
+; L is the message length in SHA512 blocks
+*/
+.globl _gcry_sha512_transform_amd64_avx2
+.type _gcry_sha512_transform_amd64_avx2, at function;
+.align 16
+_gcry_sha512_transform_amd64_avx2:
+	xor eax, eax
+
+	cmp rdx, 0
+	je .Lnowork
+
+	/* Allocate Stack Space */
+	mov	rax, rsp
+	sub	rsp, frame_size
+	and	rsp, ~(0x20 - 1)
+	mov	[rsp + frame_RSPSAVE], rax
+
+	/* Save GPRs */
+	mov	[rsp + frame_GPRSAVE + 8 * 0], rbp
+	mov	[rsp + frame_GPRSAVE + 8 * 1], rbx
+	mov	[rsp + frame_GPRSAVE + 8 * 2], r12
+	mov	[rsp + frame_GPRSAVE + 8 * 3], r13
+	mov	[rsp + frame_GPRSAVE + 8 * 4], r14
+	mov	[rsp + frame_GPRSAVE + 8 * 5], r15
+
+	vpblendd	xmm0, xmm0, xmm1, 0xf0
+	vpblendd	ymm0, ymm0, ymm1, 0xf0
+
+	shl	NUM_BLKS, 7	/* convert to bytes */
+	jz	.Ldone_hash
+	add	NUM_BLKS, INP	/* pointer to end of data */
+	mov	[rsp + frame_INPEND], NUM_BLKS
+
+	/*; load initial digest */
+	mov	a,[8*0 + CTX]
+	mov	b,[8*1 + CTX]
+	mov	c,[8*2 + CTX]
+	mov	d,[8*3 + CTX]
+	mov	e,[8*4 + CTX]
+	mov	f,[8*5 + CTX]
+	mov	g,[8*6 + CTX]
+	mov	h,[8*7 + CTX]
+
+	vmovdqa	BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP]
+
+.Loop0:
+	lea	TBL,[.LK512 ADD_RIP]
+
+	/*; byte swap first 16 dwords */
+	COPY_YMM_AND_BSWAP	Y_0, [INP + 0*32], BYTE_FLIP_MASK
+	COPY_YMM_AND_BSWAP	Y_1, [INP + 1*32], BYTE_FLIP_MASK
+	COPY_YMM_AND_BSWAP	Y_2, [INP + 2*32], BYTE_FLIP_MASK
+	COPY_YMM_AND_BSWAP	Y_3, [INP + 3*32], BYTE_FLIP_MASK
+
+	mov	[rsp + frame_INP], INP
+
+	/*; schedule 64 input dwords, by doing 12 rounds of 4 each */
+	movq	[rsp + frame_SRND],4
+
+.align 16
+.Loop1:
+	vpaddq	XFER, Y_0, [TBL + 0*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	FOUR_ROUNDS_AND_SCHED
+
+	vpaddq	XFER, Y_0, [TBL + 1*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	FOUR_ROUNDS_AND_SCHED
+
+	vpaddq	XFER, Y_0, [TBL + 2*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	FOUR_ROUNDS_AND_SCHED
+
+	vpaddq	XFER, Y_0, [TBL + 3*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	add	TBL, 4*32
+	FOUR_ROUNDS_AND_SCHED
+
+	subq	[rsp + frame_SRND], 1
+	jne	.Loop1
+
+	movq	[rsp + frame_SRND], 2
+.Loop2:
+	vpaddq	XFER, Y_0, [TBL + 0*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	DO_4ROUNDS
+	vpaddq	XFER, Y_1, [TBL + 1*32]
+	vmovdqa [rsp + frame_XFER], XFER
+	add	TBL, 2*32
+	DO_4ROUNDS
+
+	vmovdqa	Y_0, Y_2
+	vmovdqa	Y_1, Y_3
+
+	subq	[rsp + frame_SRND], 1
+	jne	.Loop2
+
+	addm	[8*0 + CTX],a
+	addm	[8*1 + CTX],b
+	addm	[8*2 + CTX],c
+	addm	[8*3 + CTX],d
+	addm	[8*4 + CTX],e
+	addm	[8*5 + CTX],f
+	addm	[8*6 + CTX],g
+	addm	[8*7 + CTX],h
+
+	mov	INP, [rsp + frame_INP]
+	add	INP, 128
+	cmp	INP, [rsp + frame_INPEND]
+	jne	.Loop0
+
+.Ldone_hash:
+
+	/* Restore GPRs */
+	mov	rbp, [rsp + frame_GPRSAVE + 8 * 0]
+	mov	rbx, [rsp + frame_GPRSAVE + 8 * 1]
+	mov	r12, [rsp + frame_GPRSAVE + 8 * 2]
+	mov	r13, [rsp + frame_GPRSAVE + 8 * 3]
+	mov	r14, [rsp + frame_GPRSAVE + 8 * 4]
+	mov	r15, [rsp + frame_GPRSAVE + 8 * 5]
+
+	/* Restore Stack Pointer */
+	mov	rsp, [rsp + frame_RSPSAVE]
+
+	mov	rax, frame_size
+.Lnowork:
+	ret
+
+/*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
+/*;; Binary Data */
+
+.data
+
+.align 64
+/* K[t] used in SHA512 hashing */
+.LK512:
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+.align 32
+
+/* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */
+.LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
+                           .octa 0x18191a1b1c1d1e1f1011121314151617
+
+.LMASK_YMM_LO: 		   .octa 0x00000000000000000000000000000000
+             		   .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+
+#endif
+#endif
diff --git a/cipher/sha512.c b/cipher/sha512.c
index 34b3215..586c809 100644
--- a/cipher/sha512.c
+++ b/cipher/sha512.c
@@ -74,6 +74,24 @@
 #endif
 
 
+/* USE_AVX indicates whether to compile with Intel AVX code. */
+#undef USE_AVX
+#if defined(__x86_64__) && defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_AVX) && \
+    defined(HAVE_INTEL_SYNTAX_PLATFORM_AS)
+# define USE_AVX 1
+#endif
+
+
+/* USE_AVX2 indicates whether to compile with Intel AVX2/rorx code. */
+#undef USE_AVX2
+#if defined(__x86_64__) && defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) && \
+    defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \
+    defined(HAVE_INTEL_SYNTAX_PLATFORM_AS)
+# define USE_AVX2 1
+#endif
+
+
 typedef struct
 {
   u64 h0, h1, h2, h3, h4, h5, h6, h7;
@@ -89,6 +107,12 @@ typedef struct
 #ifdef USE_SSSE3
   unsigned int use_ssse3:1;
 #endif
+#ifdef USE_AVX
+  unsigned int use_avx:1;
+#endif
+#ifdef USE_AVX2
+  unsigned int use_avx2:1;
+#endif
 } SHA512_CONTEXT;
 
 static unsigned int
@@ -99,6 +123,7 @@ sha512_init (void *context)
 {
   SHA512_CONTEXT *ctx = context;
   SHA512_STATE *hd = &ctx->state;
+  unsigned int features = _gcry_get_hw_features ();
 
   hd->h0 = U64_C(0x6a09e667f3bcc908);
   hd->h1 = U64_C(0xbb67ae8584caa73b);
@@ -116,11 +141,19 @@ sha512_init (void *context)
   ctx->bctx.bwrite = transform;
 
 #ifdef USE_ARM_NEON_ASM
-  ctx->use_neon = (_gcry_get_hw_features () & HWF_ARM_NEON) != 0;
+  ctx->use_neon = (features & HWF_ARM_NEON) != 0;
 #endif
 #ifdef USE_SSSE3
-  ctx->use_ssse3 = (_gcry_get_hw_features () & HWF_INTEL_SSSE3) != 0;
+  ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+#endif
+#ifdef USE_AVX
+  ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_CPU);
+#endif
+#ifdef USE_AVX2
+  ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
 #endif
+
+  (void)features;
 }
 
 static void
@@ -128,6 +161,7 @@ sha384_init (void *context)
 {
   SHA512_CONTEXT *ctx = context;
   SHA512_STATE *hd = &ctx->state;
+  unsigned int features = _gcry_get_hw_features ();
 
   hd->h0 = U64_C(0xcbbb9d5dc1059ed8);
   hd->h1 = U64_C(0x629a292a367cd507);
@@ -145,11 +179,19 @@ sha384_init (void *context)
   ctx->bctx.bwrite = transform;
 
 #ifdef USE_ARM_NEON_ASM
-  ctx->use_neon = (_gcry_get_hw_features () & HWF_ARM_NEON) != 0;
+  ctx->use_neon = (features & HWF_ARM_NEON) != 0;
 #endif
 #ifdef USE_SSSE3
-  ctx->use_ssse3 = (_gcry_get_hw_features () & HWF_INTEL_SSSE3) != 0;
+  ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
+#endif
+#ifdef USE_AVX
+  ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_CPU);
 #endif
+#ifdef USE_AVX2
+  ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
+#endif
+
+  (void)features;
 }
 
 
@@ -507,12 +549,34 @@ unsigned int _gcry_sha512_transform_amd64_ssse3(const void *input_data,
 					        void *state, size_t num_blks);
 #endif
 
+#ifdef USE_AVX
+unsigned int _gcry_sha512_transform_amd64_avx(const void *input_data,
+					      void *state, size_t num_blks);
+#endif
+
+#ifdef USE_AVX2
+unsigned int _gcry_sha512_transform_amd64_avx2(const void *input_data,
+					       void *state, size_t num_blks);
+#endif
+
 
 static unsigned int
 transform (void *context, const unsigned char *data)
 {
   SHA512_CONTEXT *ctx = context;
 
+#ifdef USE_AVX2
+  if (ctx->use_avx2)
+    return _gcry_sha512_transform_amd64_avx2 (data, &ctx->state, 1)
+           + 4 * sizeof(void*);
+#endif
+
+#ifdef USE_AVX
+  if (ctx->use_avx)
+    return _gcry_sha512_transform_amd64_avx (data, &ctx->state, 1)
+           + 4 * sizeof(void*);
+#endif
+
 #ifdef USE_SSSE3
   if (ctx->use_ssse3)
     return _gcry_sha512_transform_amd64_ssse3 (data, &ctx->state, 1)
diff --git a/configure.ac b/configure.ac
index b930f4e..5a83160 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1050,6 +1050,23 @@ fi
 
 
 #
+# Check whether GCC inline assembler supports BMI2 instructions
+#
+AC_CACHE_CHECK([whether GCC inline assembler supports BMI2 instructions],
+       [gcry_cv_gcc_inline_asm_bmi2],
+       [gcry_cv_gcc_inline_asm_bmi2=no
+        AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+          [[void a(void) {
+              __asm__("rorxq \$23, %%rax, %%rdx\\n\\t":::"memory");
+            }]])],
+          [gcry_cv_gcc_inline_asm_bmi2=yes])])
+if test "$gcry_cv_gcc_inline_asm_bmi2" = "yes" ; then
+   AC_DEFINE(HAVE_GCC_INLINE_ASM_BMI2,1,
+     [Defined if inline assembler supports BMI2 instructions])
+fi
+
+
+#
 # Check whether GCC assembler supports features needed for our amd64
 # implementations
 #
@@ -1732,6 +1749,8 @@ if test "$found" = "1" ; then
       x86_64-*-*)
          # Build with the assembly implementation
          GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-ssse3-amd64.lo"
+         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-avx-amd64.lo"
+         GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512-avx2-bmi2-amd64.lo"
       ;;
    esac
 
diff --git a/src/g10lib.h b/src/g10lib.h
index 1dcadfa..98ed92b 100644
--- a/src/g10lib.h
+++ b/src/g10lib.h
@@ -167,6 +167,8 @@ int _gcry_log_verbosity( int level );
 #define HWF_PADLOCK_SHA  4
 #define HWF_PADLOCK_MMUL 8
 
+#define HWF_INTEL_CPU    16
+#define HWF_INTEL_BMI2   32
 #define HWF_INTEL_SSSE3  64
 #define HWF_INTEL_PCLMUL 128
 #define HWF_INTEL_AESNI  256
diff --git a/src/global.c b/src/global.c
index de99286..909adec 100644
--- a/src/global.c
+++ b/src/global.c
@@ -66,6 +66,8 @@ static struct
     { HWF_PADLOCK_AES, "padlock-aes" },
     { HWF_PADLOCK_SHA, "padlock-sha" },
     { HWF_PADLOCK_MMUL,"padlock-mmul"},
+    { HWF_INTEL_CPU,   "intel-cpu" },
+    { HWF_INTEL_BMI2,  "intel-bmi2" },
     { HWF_INTEL_SSSE3, "intel-ssse3" },
     { HWF_INTEL_PCLMUL,"intel-pclmul" },
     { HWF_INTEL_AESNI, "intel-aesni" },
diff --git a/src/hwf-x86.c b/src/hwf-x86.c
index ab6dacd..4e82558 100644
--- a/src/hwf-x86.c
+++ b/src/hwf-x86.c
@@ -189,6 +189,7 @@ detect_x86_gnuc (void)
   else if (!strcmp (vendor_id, "GenuineIntel"))
     {
       /* This is an Intel CPU.  */
+      result |= HWF_INTEL_CPU;
     }
   else if (!strcmp (vendor_id, "AuthenticAMD"))
     {
@@ -231,10 +232,14 @@ detect_x86_gnuc (void)
    * Source: http://www.sandpile.org/x86/cpuid.htm  */
   if (max_cpuid_level >= 7 && (features & 0x00000001))
     {
-#ifdef ENABLE_AVX2_SUPPORT
       /* Get CPUID:7 contains further Intel feature flags. */
       get_cpuid(7, NULL, &features, NULL, NULL);
 
+      /* Test bit 8 for BMI2.  */
+      if (features & 0x00000100)
+          result |= HWF_INTEL_BMI2;
+
+#ifdef ENABLE_AVX2_SUPPORT
       /* Test bit 5 for AVX2.  */
       if (features & 0x00000020)
           result |= HWF_INTEL_AVX2;




More information about the Gcrypt-devel mailing list