[git] GCRYPT - branch, master, updated. libgcrypt-1.7.3-84-g4cd9499

by Jussi Kivilinna cvs at cvs.gnupg.org
Sun May 21 18:07:08 CEST 2017


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "The GNU crypto library".

The branch, master has been updated
       via  4cd94994a9abec9b92fa5972869baf089a28fa76 (commit)
       via  68861ae5d3e007d7a39f14ea27dc3dd8ef13ba02 (commit)
       via  1a094bc5b2aa730833faf593a931d4e5d7f9ab4d (commit)
       via  ff02fca39c83bcf30c79368611ac65e273e77f6c (commit)
       via  434d4f2af39033fc626044ba9a060da298522293 (commit)
      from  9b651fb632f3697e70685c9ee340ab0cb2274bdf (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 4cd94994a9abec9b92fa5972869baf089a28fa76
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Tue May 16 21:22:11 2017 +0300

    rijndael-ssse3: fix functions calls from assembly blocks
    
    * cipher/rijndael-ssse3-amd64.c (PUSH_STACK_PTR, POP_STACK_PTR): New.
    (vpaes_ssse3_prepare_enc, vpaes_ssse3_prepare_dec)
    (_gcry_aes_ssse3_do_setkey, _gcry_aes_ssse3_prepare_decryption)
    (do_vpaes_ssse3_enc, do_vpaes_ssse3_dec): Use PUSH_STACK_PTR and
    POP_STACK_PTR.
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/rijndael-ssse3-amd64.c b/cipher/rijndael-ssse3-amd64.c
index 78d8234..da5339e 100644
--- a/cipher/rijndael-ssse3-amd64.c
+++ b/cipher/rijndael-ssse3-amd64.c
@@ -110,6 +110,8 @@ extern void _gcry_aes_ssse3_decrypt_core(void);
                   : \
                   : "r" (ssse3_state) \
                   : "memory" )
+# define PUSH_STACK_PTR
+# define POP_STACK_PTR
 #else
 # define SSSE3_STATE_SIZE 1
 # define vpaes_ssse3_prepare() (void)ssse3_state
@@ -124,18 +126,27 @@ extern void _gcry_aes_ssse3_decrypt_core(void);
                   "pxor	%%xmm7,  %%xmm7 \n\t" \
                   "pxor	%%xmm8,  %%xmm8 \n\t" \
                   ::: "memory" )
+/* Old GCC versions use red-zone of AMD64 SYSV ABI and stack pointer is
+ * not properly adjusted for assembly block. Therefore stack pointer
+ * needs to be manually corrected. */
+# define PUSH_STACK_PTR "subq $128, %%rsp;\n\t"
+# define POP_STACK_PTR  "addq $128, %%rsp;\n\t"
 #endif
 
 #define vpaes_ssse3_prepare_enc() \
     vpaes_ssse3_prepare(); \
-    asm volatile ("callq *%q[core] \n\t" \
+    asm volatile (PUSH_STACK_PTR \
+                  "callq *%q[core] \n\t" \
+                  POP_STACK_PTR \
                   : \
                   : [core] "r" (_gcry_aes_ssse3_enc_preload) \
                   : "rax", "cc", "memory" )
 
 #define vpaes_ssse3_prepare_dec() \
     vpaes_ssse3_prepare(); \
-    asm volatile ("callq *%q[core] \n\t" \
+    asm volatile (PUSH_STACK_PTR \
+                  "callq *%q[core] \n\t" \
+                  POP_STACK_PTR \
                   : \
                   : [core] "r" (_gcry_aes_ssse3_dec_preload) \
                   : "rax", "cc", "memory" )
@@ -155,7 +166,9 @@ _gcry_aes_ssse3_do_setkey (RIJNDAEL_context *ctx, const byte *key)
                 "leaq %[buf], %%rdx"			"\n\t"
                 "movl %[dir], %%ecx"			"\n\t"
                 "movl %[rotoffs], %%r8d"		"\n\t"
+                PUSH_STACK_PTR
                 "callq *%q[core]"			"\n\t"
+                POP_STACK_PTR
                 :
                 : [core] "r" (&_gcry_aes_ssse3_schedule_core),
                   [key] "m" (*key),
@@ -208,7 +221,9 @@ _gcry_aes_ssse3_prepare_decryption (RIJNDAEL_context *ctx)
                 "leaq %[buf], %%rdx"			"\n\t"
                 "movl %[dir], %%ecx"			"\n\t"
                 "movl %[rotoffs], %%r8d"		"\n\t"
+                PUSH_STACK_PTR
                 "callq *%q[core]"			"\n\t"
+                POP_STACK_PTR
                 :
                 : [core] "r" (_gcry_aes_ssse3_schedule_core),
                   [key] "m" (ctx->keyschdec32[0][0]),
@@ -231,7 +246,9 @@ do_vpaes_ssse3_enc (const RIJNDAEL_context *ctx, unsigned int nrounds)
   unsigned int middle_rounds = nrounds - 1;
   const void *keysched = ctx->keyschenc32;
 
-  asm volatile ("callq *%q[core]"			"\n\t"
+  asm volatile (PUSH_STACK_PTR
+		"callq *%q[core]"			"\n\t"
+		POP_STACK_PTR
 		: "+a" (middle_rounds), "+d" (keysched)
 		: [core] "r" (_gcry_aes_ssse3_encrypt_core)
 		: "rcx", "rsi", "rdi", "cc", "memory");
@@ -246,10 +263,12 @@ do_vpaes_ssse3_dec (const RIJNDAEL_context *ctx, unsigned int nrounds)
   unsigned int middle_rounds = nrounds - 1;
   const void *keysched = ctx->keyschdec32;
 
-  asm volatile ("callq *%q[core]"			"\n\t"
-                : "+a" (middle_rounds), "+d" (keysched)
+  asm volatile (PUSH_STACK_PTR
+		"callq *%q[core]"			"\n\t"
+		POP_STACK_PTR
+		: "+a" (middle_rounds), "+d" (keysched)
 		: [core] "r" (_gcry_aes_ssse3_decrypt_core)
-                : "rcx", "rsi", "cc", "memory");
+		: "rcx", "rsi", "cc", "memory");
 }
 
 

commit 68861ae5d3e007d7a39f14ea27dc3dd8ef13ba02
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Sat May 13 18:36:00 2017 +0300

    chacha20-armv7-neon: fix to use fast code path when memory is aligned
    
    * cipher/chacha20-armv7-neon.S (UNALIGNED_LDMIA4): Uncomment
    instruction for jump to aligned code path.
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/chacha20-armv7-neon.S b/cipher/chacha20-armv7-neon.S
index 4d3340b..c1971fc 100644
--- a/cipher/chacha20-armv7-neon.S
+++ b/cipher/chacha20-armv7-neon.S
@@ -54,7 +54,7 @@
 
 #define UNALIGNED_LDMIA4(ptr, l0, l1, l2, l3) \
         tst ptr, #3; \
-        /*beq 1f;*/ \
+        beq 1f; \
         vpush {d0-d1}; \
         vld1.32 {d0-d1}, [ptr]; \
         add ptr, #16; \

commit 1a094bc5b2aa730833faf593a931d4e5d7f9ab4d
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Sat May 13 18:53:08 2017 +0300

    Move data in AMD64 assembly to text section
    
    * cipher/camellia-aesni-avx-amd64.S: Move data to .text section to
    ensure that RIP relative addressing of data will work.
    * cipher/camellia-aesni-avx2-amd64.S: Ditto.
    * cipher/chacha20-avx2-amd64.S: Ditto.
    * cipher/chacha20-ssse3-amd64.S: Ditto.
    * cipher/des-amd64.S: Ditto.
    * cipher/serpent-avx2-amd64.S: Ditto.
    * cipher/sha1-avx-amd64.S: Ditto.
    * cipher/sha1-avx-bmi2-amd64.S: Ditto.
    * cipher/sha1-ssse3-amd64.S: Ditto.
    * cipher/sha256-avx-amd64.S: Ditto.
    * cipher/sha256-avx2-bmi2-amd64.S: Ditto.
    * cipher/sha256-ssse3-amd64.S: Ditto.
    * cipher/sha512-avx-amd64.S: Ditto.
    * cipher/sha512-avx2-bmi2-amd64.S: Ditto.
    * cipher/sha512-ssse3-amd64.S: Ditto.
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/camellia-aesni-avx-amd64.S b/cipher/camellia-aesni-avx-amd64.S
index 5a3a3cb..8022934 100644
--- a/cipher/camellia-aesni-avx-amd64.S
+++ b/cipher/camellia-aesni-avx-amd64.S
@@ -629,7 +629,7 @@
 	vmovdqu y6, 14 * 16(rio); \
 	vmovdqu y7, 15 * 16(rio);
 
-.data
+.text
 .align 16
 
 #define SHUFB_BYTES(idx) \
@@ -773,7 +773,6 @@
 .L0f0f0f0f:
 	.long 0x0f0f0f0f
 
-.text
 
 .align 8
 ELF(.type   __camellia_enc_blk16, at function;)
@@ -1702,7 +1701,6 @@ ELF(.size _gcry_camellia_aesni_avx_ocb_auth,.-_gcry_camellia_aesni_avx_ocb_auth;
 	vpsllq $(64-(nror)), out, out; \
 	vpaddd t0, out, out;
 
-.data
 
 .align 16
 .Linv_shift_row_and_unpcklbw:
@@ -1735,7 +1733,6 @@ ELF(.size _gcry_camellia_aesni_avx_ocb_auth,.-_gcry_camellia_aesni_avx_ocb_auth;
 .Lsigma6:
 	.long 0xB3E6C1FD, 0xB05688C2;
 
-.text
 
 .align 8
 ELF(.type  __camellia_avx_setup128, at function;)
diff --git a/cipher/camellia-aesni-avx2-amd64.S b/cipher/camellia-aesni-avx2-amd64.S
index 26381df..897e4ae 100644
--- a/cipher/camellia-aesni-avx2-amd64.S
+++ b/cipher/camellia-aesni-avx2-amd64.S
@@ -613,7 +613,7 @@
 	vmovdqu y6, 14 * 32(rio); \
 	vmovdqu y7, 15 * 32(rio);
 
-.data
+.text
 .align 32
 
 #define SHUFB_BYTES(idx) \
@@ -752,7 +752,6 @@
 .L0f0f0f0f:
 	.long 0x0f0f0f0f
 
-.text
 
 .align 8
 ELF(.type   __camellia_enc_blk32, at function;)
diff --git a/cipher/chacha20-avx2-amd64.S b/cipher/chacha20-avx2-amd64.S
index 12bed35..8c085ba 100644
--- a/cipher/chacha20-avx2-amd64.S
+++ b/cipher/chacha20-avx2-amd64.S
@@ -947,7 +947,6 @@ _gcry_chacha20_amd64_avx2_blocks:
 	ret
 ELF(.size _gcry_chacha20_amd64_avx2_blocks,.-_gcry_chacha20_amd64_avx2_blocks;)
 
-.data
 .align 16
 .LC:
 .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13       /* pshufb rotate by 16 */
diff --git a/cipher/chacha20-ssse3-amd64.S b/cipher/chacha20-ssse3-amd64.S
index a1a843f..c04010e 100644
--- a/cipher/chacha20-ssse3-amd64.S
+++ b/cipher/chacha20-ssse3-amd64.S
@@ -623,7 +623,6 @@ _gcry_chacha20_amd64_ssse3_blocks:
 	ret
 ELF(.size _gcry_chacha20_amd64_ssse3_blocks,.-_gcry_chacha20_amd64_ssse3_blocks;)
 
-.data
 .align 16;
 .LC:
 .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13       /* pshufb rotate by 16 */
diff --git a/cipher/des-amd64.S b/cipher/des-amd64.S
index 307d211..1b7cfba 100644
--- a/cipher/des-amd64.S
+++ b/cipher/des-amd64.S
@@ -766,7 +766,6 @@ _gcry_3des_amd64_cfb_dec:
 	ret;
 ELF(.size _gcry_3des_amd64_cfb_dec,.-_gcry_3des_amd64_cfb_dec;)
 
-.data
 .align 16
 .L_s1:
 	.quad 0x0010100001010400, 0x0000000000000000
diff --git a/cipher/serpent-avx2-amd64.S b/cipher/serpent-avx2-amd64.S
index 2902dab..8d60a15 100644
--- a/cipher/serpent-avx2-amd64.S
+++ b/cipher/serpent-avx2-amd64.S
@@ -1113,7 +1113,6 @@ _gcry_serpent_avx2_ocb_auth:
 	ret;
 ELF(.size _gcry_serpent_avx2_ocb_auth,.-_gcry_serpent_avx2_ocb_auth;)
 
-.data
 .align 16
 
 /* For CTR-mode IV byteswap */
diff --git a/cipher/sha1-avx-amd64.S b/cipher/sha1-avx-amd64.S
index 3b3a6d1..b14603b 100644
--- a/cipher/sha1-avx-amd64.S
+++ b/cipher/sha1-avx-amd64.S
@@ -58,7 +58,7 @@
 
 /* Constants */
 
-.data
+.text
 #define K1  0x5A827999
 #define K2  0x6ED9EBA1
 #define K3  0x8F1BBCDC
@@ -214,7 +214,6 @@
  * _gcry_sha1_transform_amd64_avx (void *ctx, const unsigned char *data,
  *                                  size_t nblks)
  */
-.text
 .globl _gcry_sha1_transform_amd64_avx
 ELF(.type _gcry_sha1_transform_amd64_avx, at function)
 .align 16
diff --git a/cipher/sha1-avx-bmi2-amd64.S b/cipher/sha1-avx-bmi2-amd64.S
index 22bcbb3..b267693 100644
--- a/cipher/sha1-avx-bmi2-amd64.S
+++ b/cipher/sha1-avx-bmi2-amd64.S
@@ -59,7 +59,7 @@
 
 /* Constants */
 
-.data
+.text
 #define K1  0x5A827999
 #define K2  0x6ED9EBA1
 #define K3  0x8F1BBCDC
@@ -212,7 +212,6 @@
  * _gcry_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data,
  *                                      size_t nblks)
  */
-.text
 .globl _gcry_sha1_transform_amd64_avx_bmi2
 ELF(.type _gcry_sha1_transform_amd64_avx_bmi2, at function)
 .align 16
diff --git a/cipher/sha1-ssse3-amd64.S b/cipher/sha1-ssse3-amd64.S
index 98a19e6..2b43947 100644
--- a/cipher/sha1-ssse3-amd64.S
+++ b/cipher/sha1-ssse3-amd64.S
@@ -58,7 +58,7 @@
 
 /* Constants */
 
-.data
+.text
 #define K1  0x5A827999
 #define K2  0x6ED9EBA1
 #define K3  0x8F1BBCDC
@@ -226,7 +226,6 @@
  * _gcry_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data,
  *                                   size_t nblks)
  */
-.text
 .globl _gcry_sha1_transform_amd64_ssse3
 ELF(.type _gcry_sha1_transform_amd64_ssse3, at function)
 .align 16
diff --git a/cipher/sha256-avx-amd64.S b/cipher/sha256-avx-amd64.S
index 8bf26bd..6953855 100644
--- a/cipher/sha256-avx-amd64.S
+++ b/cipher/sha256-avx-amd64.S
@@ -496,7 +496,6 @@ _gcry_sha256_transform_amd64_avx:
 	ret
 
 
-.data
 .align 16
 .LK256:
 	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
diff --git a/cipher/sha256-avx2-bmi2-amd64.S b/cipher/sha256-avx2-bmi2-amd64.S
index 74b6063..85e663f 100644
--- a/cipher/sha256-avx2-bmi2-amd64.S
+++ b/cipher/sha256-avx2-bmi2-amd64.S
@@ -763,7 +763,6 @@ _gcry_sha256_transform_amd64_avx2:
 
 	ret
 
-.data
 .align 64
 .LK256:
 	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
diff --git a/cipher/sha256-ssse3-amd64.S b/cipher/sha256-ssse3-amd64.S
index 9ec87e4..a9213e4 100644
--- a/cipher/sha256-ssse3-amd64.S
+++ b/cipher/sha256-ssse3-amd64.S
@@ -516,7 +516,6 @@ _gcry_sha256_transform_amd64_ssse3:
 	ret
 
 
-.data
 .align 16
 .LK256:
 	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
diff --git a/cipher/sha512-avx-amd64.S b/cipher/sha512-avx-amd64.S
index 699c271..446a8b4 100644
--- a/cipher/sha512-avx-amd64.S
+++ b/cipher/sha512-avx-amd64.S
@@ -368,8 +368,6 @@ _gcry_sha512_transform_amd64_avx:
 ;;; Binary Data
 */
 
-.data
-
 .align 16
 
 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */
diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S
index 02f95af..05bef64 100644
--- a/cipher/sha512-avx2-bmi2-amd64.S
+++ b/cipher/sha512-avx2-bmi2-amd64.S
@@ -735,8 +735,6 @@ _gcry_sha512_transform_amd64_avx2:
 /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */
 /*;; Binary Data */
 
-.data
-
 .align 64
 /* K[t] used in SHA512 hashing */
 .LK512:
diff --git a/cipher/sha512-ssse3-amd64.S b/cipher/sha512-ssse3-amd64.S
index c721bcf..51193b3 100644
--- a/cipher/sha512-ssse3-amd64.S
+++ b/cipher/sha512-ssse3-amd64.S
@@ -373,8 +373,6 @@ _gcry_sha512_transform_amd64_ssse3:
 ;;; Binary Data
 */
 
-.data
-
 .align 16
 
 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */

commit ff02fca39c83bcf30c79368611ac65e273e77f6c
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Sat May 13 18:35:30 2017 +0300

    cast5-amd64: use 64-bit relocation with large PIC memory model
    
    * cipher/cast5-amd64.S [__code_model_large__]
    (GET_EXTERN_POINTER): New.
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/cast5-amd64.S b/cipher/cast5-amd64.S
index 608fb64..c04015a 100644
--- a/cipher/cast5-amd64.S
+++ b/cipher/cast5-amd64.S
@@ -26,7 +26,20 @@
 #if defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS) || !defined(__PIC__)
 #  define GET_EXTERN_POINTER(name, reg) movabsq $name, reg
 #else
-#  define GET_EXTERN_POINTER(name, reg) movq name at GOTPCREL(%rip), reg
+#  ifdef __code_model_large__
+#    define GET_EXTERN_POINTER(name, reg) \
+	       pushq %r15; \
+	       pushq %r14; \
+	    1: leaq 1b(%rip), reg; \
+	       movabsq $_GLOBAL_OFFSET_TABLE_-1b, %r14; \
+	       movabsq $name at GOT, %r15; \
+	       addq %r14, reg; \
+	       popq %r14; \
+	       movq (reg, %r15), reg; \
+	       popq %r15;
+#  else
+#    define GET_EXTERN_POINTER(name, reg) movq name at GOTPCREL(%rip), reg
+#  endif
 #endif
 
 #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS

commit 434d4f2af39033fc626044ba9a060da298522293
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Sat May 13 17:53:27 2017 +0300

    Fix building with x86-64 medium and large memory models
    
    * cipher/cast5-amd64.S [HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS]
    (GET_EXTERN_POINTER): Load 64-bit address instead of 32-bit.
    * cipher/rijndael.c (do_encrypt, do_decrypt)
    [USE_AMD64_ASM && !HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS]: Load
    table pointer through register instead of generic reference.
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/cipher/cast5-amd64.S b/cipher/cast5-amd64.S
index a5f078e..608fb64 100644
--- a/cipher/cast5-amd64.S
+++ b/cipher/cast5-amd64.S
@@ -24,7 +24,7 @@
      defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_CAST5)
 
 #if defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS) || !defined(__PIC__)
-#  define GET_EXTERN_POINTER(name, reg) leaq name, reg
+#  define GET_EXTERN_POINTER(name, reg) movabsq $name, reg
 #else
 #  define GET_EXTERN_POINTER(name, reg) movq name at GOTPCREL(%rip), reg
 #endif
diff --git a/cipher/rijndael.c b/cipher/rijndael.c
index 66ea0f3..8637195 100644
--- a/cipher/rijndael.c
+++ b/cipher/rijndael.c
@@ -752,7 +752,7 @@ do_encrypt (const RIJNDAEL_context *ctx,
                   "+d" (ax),
                   "+c" (rounds)
                 : "0" (_gcry_aes_amd64_encrypt_block),
-                  [encT] "g" (encT)
+                  [encT] "r" (encT)
                 : "cc", "memory", "r8", "r9", "r10", "r11");
   return ret;
 # endif /* HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS */
@@ -1135,7 +1135,7 @@ do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx,
                   "+d" (ax),
                   "+c" (rounds)
                 : "0" (_gcry_aes_amd64_decrypt_block),
-                  [dectabs] "g" (&dec_tables)
+                  [dectabs] "r" (&dec_tables)
                 : "cc", "memory", "r8", "r9", "r10", "r11");
   return ret;
 # endif /* HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS */

-----------------------------------------------------------------------

Summary of changes:
 cipher/camellia-aesni-avx-amd64.S  |  5 +----
 cipher/camellia-aesni-avx2-amd64.S |  3 +--
 cipher/cast5-amd64.S               | 17 +++++++++++++++--
 cipher/chacha20-armv7-neon.S       |  2 +-
 cipher/chacha20-avx2-amd64.S       |  1 -
 cipher/chacha20-ssse3-amd64.S      |  1 -
 cipher/des-amd64.S                 |  1 -
 cipher/rijndael-ssse3-amd64.c      | 31 +++++++++++++++++++++++++------
 cipher/rijndael.c                  |  4 ++--
 cipher/serpent-avx2-amd64.S        |  1 -
 cipher/sha1-avx-amd64.S            |  3 +--
 cipher/sha1-avx-bmi2-amd64.S       |  3 +--
 cipher/sha1-ssse3-amd64.S          |  3 +--
 cipher/sha256-avx-amd64.S          |  1 -
 cipher/sha256-avx2-bmi2-amd64.S    |  1 -
 cipher/sha256-ssse3-amd64.S        |  1 -
 cipher/sha512-avx-amd64.S          |  2 --
 cipher/sha512-avx2-bmi2-amd64.S    |  2 --
 cipher/sha512-ssse3-amd64.S        |  2 --
 19 files changed, 48 insertions(+), 36 deletions(-)


hooks/post-receive
-- 
The GNU crypto library
http://git.gnupg.org


_______________________________________________
Gnupg-commits mailing list
Gnupg-commits at gnupg.org
http://lists.gnupg.org/mailman/listinfo/gnupg-commits




More information about the Gcrypt-devel mailing list