[git] GCRYPT - branch, master, updated. libgcrypt-1.8.1-57-g0de2191

by Martin Storsjö cvs at cvs.gnupg.org
Wed Mar 28 19:40:21 CEST 2018


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "The GNU crypto library".

The branch, master has been updated
       via  0de2191a07d69ef1fa34ca4c5d5fc4985ff7b4c4 (commit)
       via  4e1b628f492643d4e9b830bcdab7b49daaec5854 (commit)
       via  36e916fc332eda74963192b1c0bf6860a3e5d67b (commit)
       via  ec0a2f25c0f64a7b65b373508ce9081e10461965 (commit)
       via  ed41d6d6fb4551342b22ef763de1bd60e964e186 (commit)
       via  8ee38806245ca8452051b1a245f44082323f37f6 (commit)
      from  885f031fbd17abc1c0fedbb98df22823b647fc11 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 0de2191a07d69ef1fa34ca4c5d5fc4985ff7b4c4
Author: Martin Storsjö <martin at martin.st>
Date:   Thu Mar 22 23:32:40 2018 +0200

    aarch64: Enable building the aarch64 cipher assembly for windows
    
    * cipher/asm-common-aarch64.h: New.
    * cipher/camellia-aarch64.S: Use ELF macro, use x19 instead of x18.
    * cipher/chacha20-aarch64.S: Use ELF macro, don't use GOT on windows.
    * cipher/cipher-gcm-armv8-aarch64-ce.S: Use ELF macro.
    * cipher/rijndael-aarch64.S: Use ELF macro.
    * cipher/rijndael-armv8-aarch64-ce.S: Use ELF macro.
    * cipher/sha1-armv8-aarch64-ce.S: Use ELF macro.
    * cipher/sha256-armv8-aarch64-ce.S: Use ELF macro.
    * cipher/twofish-aarch64.S: Use ELF macro.
    * configure.ac: Don't require .size and .type in aarch64 assembly check.
    --
    Don't require .type and .size in configure; we can make
    them optional via a preprocessor macro.
    
    This is mostly a mechanical change, wrapping the .type and .size
    directives in an ELF() macro, with two actual manual changes:
    (when targeting windows):
    - Don't load global symbols via a GOT (in chacha20)
    - Don't use the x18 register (in camellia); back up and restore x19
      in the prologue/epilogue and use that instead.
    
    x18 is a platform specific register; on linux, it's free to be used
    by user code, while it's reserved for platform use on windows and
    darwin. Always use x19 instead of x18 for consistency.
    
    Signed-off-by: Martin Storsjö <martin at martin.st>

diff --git a/cipher/asm-common-aarch64.h b/cipher/asm-common-aarch64.h
new file mode 100644
index 0000000..814b7ad
--- /dev/null
+++ b/cipher/asm-common-aarch64.h
@@ -0,0 +1,32 @@
+/* asm-common-aarch64.h  -  Common macros for AArch64 assembly
+ *
+ * Copyright (C) 2018 Martin Storsjö <martin at martin.st>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GCRY_ASM_COMMON_AARCH64_H
+#define GCRY_ASM_COMMON_AARCH64_H
+
+#include <config.h>
+
+#ifdef __ELF__
+# define ELF(...) __VA_ARGS__
+#else
+# define ELF(...) /*_*/
+#endif
+
+#endif /* GCRY_ASM_COMMON_AARCH64_H */
diff --git a/cipher/camellia-aarch64.S b/cipher/camellia-aarch64.S
index 68d2a7d..c3cc463 100644
--- a/cipher/camellia-aarch64.S
+++ b/cipher/camellia-aarch64.S
@@ -19,7 +19,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -55,12 +55,12 @@
 #define RT0 w15
 #define RT1 w16
 #define RT2 w17
-#define RT3 w18
+#define RT3 w19
 
 #define xRT0 x15
 #define xRT1 x16
 #define xRT2 x17
-#define xRT3 x18
+#define xRT3 x19
 
 #ifdef __AARCH64EL__
   #define host_to_be(reg, rtmp) \
@@ -198,9 +198,10 @@
 	str_output_be(RDST, YL, YR, XL, XR, RT0, RT1);
 
 .globl _gcry_camellia_arm_encrypt_block
-.type   _gcry_camellia_arm_encrypt_block, at function;
+ELF(.type   _gcry_camellia_arm_encrypt_block, at function;)
 
 _gcry_camellia_arm_encrypt_block:
+	stp x19, x30, [sp, #-16]!
 	/* input:
 	 *	x0: keytable
 	 *	x1: dst
@@ -227,6 +228,7 @@ _gcry_camellia_arm_encrypt_block:
 
 	outunpack(24);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
 
@@ -236,14 +238,16 @@ _gcry_camellia_arm_encrypt_block:
 
 	outunpack(32);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
-.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;
+ELF(.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;)
 
 .globl _gcry_camellia_arm_decrypt_block
-.type   _gcry_camellia_arm_decrypt_block, at function;
+ELF(.type   _gcry_camellia_arm_decrypt_block, at function;)
 
 _gcry_camellia_arm_decrypt_block:
+	stp x19, x30, [sp, #-16]!
 	/* input:
 	 *	x0: keytable
 	 *	x1: dst
@@ -271,6 +275,7 @@ _gcry_camellia_arm_decrypt_block:
 
 	outunpack(0);
 
+	ldp x19, x30, [sp], #16
 	ret;
 .ltorg
 
@@ -281,11 +286,11 @@ _gcry_camellia_arm_decrypt_block:
 
 	b .Ldec_128;
 .ltorg
-.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;
+ELF(.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;)
 
 /* Encryption/Decryption tables */
 .globl _gcry_camellia_arm_tables
-.type  _gcry_camellia_arm_tables, at object;
+ELF(.type  _gcry_camellia_arm_tables, at object;)
 .balign 32
 _gcry_camellia_arm_tables:
 .Lcamellia_sp1110:
@@ -551,7 +556,7 @@ _gcry_camellia_arm_tables:
 .long 0xc7c7c700, 0x008f8f8f, 0xe300e3e3, 0xf4f400f4
 .long 0x80808000, 0x00010101, 0x40004040, 0xc7c700c7
 .long 0x9e9e9e00, 0x003d3d3d, 0x4f004f4f, 0x9e9e009e
-.size _gcry_camellia_arm_tables,.-_gcry_camellia_arm_tables;
+ELF(.size _gcry_camellia_arm_tables,.-_gcry_camellia_arm_tables;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__*/
diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index 5990a08..3844d4e 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -27,7 +27,7 @@
  * Public domain.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -38,9 +38,15 @@
 
 .text
 
+#ifdef _WIN32
+#define GET_DATA_POINTER(reg, name) \
+	adrp    reg, name ; \
+	add     reg, reg, #:lo12:name ;
+#else
 #define GET_DATA_POINTER(reg, name) \
 	adrp    reg, :got:name ; \
 	ldr     reg, [reg, #:got_lo12:name] ;
+#endif
 
 /* register macros */
 #define INPUT     x0
@@ -148,7 +154,7 @@ chacha20_data:
 
 .align 3
 .globl _gcry_chacha20_aarch64_blocks4
-.type _gcry_chacha20_aarch64_blocks4,%function;
+ELF(.type _gcry_chacha20_aarch64_blocks4,%function;)
 
 _gcry_chacha20_aarch64_blocks4:
 	/* input:
@@ -303,6 +309,6 @@ _gcry_chacha20_aarch64_blocks4:
 
 	eor x0, x0, x0
 	ret
-.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;
+ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;)
 
 #endif
diff --git a/cipher/cipher-gcm-armv8-aarch64-ce.S b/cipher/cipher-gcm-armv8-aarch64-ce.S
index 0cfaf1c..b6c4f59 100644
--- a/cipher/cipher-gcm-armv8-aarch64-ce.S
+++ b/cipher/cipher-gcm-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -174,7 +174,7 @@ gcry_gcm_reduction_constant:
  */
 .align 3
 .globl _gcry_ghash_armv8_ce_pmull
-.type  _gcry_ghash_armv8_ce_pmull,%function;
+ELF(.type  _gcry_ghash_armv8_ce_pmull,%function;)
 _gcry_ghash_armv8_ce_pmull:
   /* input:
    *    x0: gcm_key
@@ -360,7 +360,7 @@ _gcry_ghash_armv8_ce_pmull:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;
+ELF(.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;)
 
 
 /*
@@ -368,7 +368,7 @@ _gcry_ghash_armv8_ce_pmull:
  */
 .align 3
 .globl _gcry_ghash_setup_armv8_ce_pmull
-.type  _gcry_ghash_setup_armv8_ce_pmull,%function;
+ELF(.type  _gcry_ghash_setup_armv8_ce_pmull,%function;)
 _gcry_ghash_setup_armv8_ce_pmull:
   /* input:
    *	x0: gcm_key
@@ -408,6 +408,6 @@ _gcry_ghash_setup_armv8_ce_pmull:
   st1 {rh5.16b-rh6.16b}, [x1]
 
   ret
-.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;
+ELF(.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;)
 
 #endif
diff --git a/cipher/rijndael-aarch64.S b/cipher/rijndael-aarch64.S
index e533bbe..aad7487 100644
--- a/cipher/rijndael-aarch64.S
+++ b/cipher/rijndael-aarch64.S
@@ -18,7 +18,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -206,7 +206,7 @@
 	addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy);
 
 .globl _gcry_aes_arm_encrypt_block
-.type   _gcry_aes_arm_encrypt_block,%function;
+ELF(.type   _gcry_aes_arm_encrypt_block,%function;)
 
 _gcry_aes_arm_encrypt_block:
 	/* input:
@@ -285,7 +285,7 @@ _gcry_aes_arm_encrypt_block:
 	lastencround(11, RNA, RNB, RNC, RND, RA, RB, RC, RD);
 
 	b .Lenc_done;
-.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;
+ELF(.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;)
 
 #define addroundkey_dec(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \
 	ldr rna, [CTX, #(((round) * 16) + 0 * 4)]; \
@@ -429,7 +429,7 @@ _gcry_aes_arm_encrypt_block:
 	addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy);
 
 .globl _gcry_aes_arm_decrypt_block
-.type   _gcry_aes_arm_decrypt_block,%function;
+ELF(.type   _gcry_aes_arm_decrypt_block,%function;)
 
 _gcry_aes_arm_decrypt_block:
 	/* input:
@@ -504,7 +504,7 @@ _gcry_aes_arm_decrypt_block:
 	decround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key);
 
 	b .Ldec_tail;
-.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;
+ELF(.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__ */
diff --git a/cipher/rijndael-armv8-aarch64-ce.S b/cipher/rijndael-armv8-aarch64-ce.S
index 40097a7..5859557 100644
--- a/cipher/rijndael-armv8-aarch64-ce.S
+++ b/cipher/rijndael-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -239,7 +239,7 @@
  */
 .align 3
 .globl _gcry_aes_enc_armv8_ce
-.type  _gcry_aes_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_enc_armv8_ce,%function;)
 _gcry_aes_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -291,7 +291,7 @@ _gcry_aes_enc_armv8_ce:
   CLEAR_REG(vk13)
   CLEAR_REG(vk14)
   b .Lenc1_tail
-.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;
+ELF(.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;)
 
 
 /*
@@ -301,7 +301,7 @@ _gcry_aes_enc_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_dec_armv8_ce
-.type  _gcry_aes_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_dec_armv8_ce,%function;)
 _gcry_aes_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -353,7 +353,7 @@ _gcry_aes_dec_armv8_ce:
   CLEAR_REG(vk13)
   CLEAR_REG(vk14)
   b .Ldec1_tail
-.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;
+ELF(.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;)
 
 
 /*
@@ -366,7 +366,7 @@ _gcry_aes_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cbc_enc_armv8_ce
-.type  _gcry_aes_cbc_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_cbc_enc_armv8_ce,%function;)
 _gcry_aes_cbc_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -419,7 +419,7 @@ _gcry_aes_cbc_enc_armv8_ce:
 
 .Lcbc_enc_skip:
   ret
-.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;
+ELF(.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;)
 
 /*
  * void _gcry_aes_cbc_dec_armv8_ce (const void *keysched,
@@ -430,7 +430,7 @@ _gcry_aes_cbc_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cbc_dec_armv8_ce
-.type  _gcry_aes_cbc_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_cbc_dec_armv8_ce,%function;)
 _gcry_aes_cbc_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -515,7 +515,7 @@ _gcry_aes_cbc_dec_armv8_ce:
 
 .Lcbc_dec_skip:
   ret
-.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;
+ELF(.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;)
 
 
 /*
@@ -527,7 +527,7 @@ _gcry_aes_cbc_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ctr_enc_armv8_ce
-.type  _gcry_aes_ctr_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_ctr_enc_armv8_ce,%function;)
 _gcry_aes_ctr_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -669,7 +669,7 @@ _gcry_aes_ctr_enc_armv8_ce:
 .Lctr_enc_skip:
   ret
 
-.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;
+ELF(.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;)
 
 
 /*
@@ -681,7 +681,7 @@ _gcry_aes_ctr_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cfb_enc_armv8_ce
-.type  _gcry_aes_cfb_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_cfb_enc_armv8_ce,%function;)
 _gcry_aes_cfb_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -732,7 +732,7 @@ _gcry_aes_cfb_enc_armv8_ce:
 
 .Lcfb_enc_skip:
   ret
-.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;
+ELF(.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;)
 
 
 /*
@@ -744,7 +744,7 @@ _gcry_aes_cfb_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_cfb_dec_armv8_ce
-.type  _gcry_aes_cfb_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_cfb_dec_armv8_ce,%function;)
 _gcry_aes_cfb_dec_armv8_ce:
   /* input:
    *    r0: keysched
@@ -829,7 +829,7 @@ _gcry_aes_cfb_dec_armv8_ce:
 
 .Lcfb_dec_skip:
   ret
-.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;
+ELF(.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;)
 
 
 /*
@@ -846,7 +846,7 @@ _gcry_aes_cfb_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_enc_armv8_ce
-.type  _gcry_aes_ocb_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_enc_armv8_ce,%function;)
 _gcry_aes_ocb_enc_armv8_ce:
   /* input:
    *    x0: keysched
@@ -979,7 +979,7 @@ _gcry_aes_ocb_enc_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;
+ELF(.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;)
 
 
 /*
@@ -996,7 +996,7 @@ _gcry_aes_ocb_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_dec_armv8_ce
-.type  _gcry_aes_ocb_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_dec_armv8_ce,%function;)
 _gcry_aes_ocb_dec_armv8_ce:
   /* input:
    *    x0: keysched
@@ -1129,7 +1129,7 @@ _gcry_aes_ocb_dec_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;
+ELF(.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;)
 
 
 /*
@@ -1145,7 +1145,7 @@ _gcry_aes_ocb_dec_armv8_ce:
 
 .align 3
 .globl _gcry_aes_ocb_auth_armv8_ce
-.type  _gcry_aes_ocb_auth_armv8_ce,%function;
+ELF(.type  _gcry_aes_ocb_auth_armv8_ce,%function;)
 _gcry_aes_ocb_auth_armv8_ce:
   /* input:
    *    x0: keysched
@@ -1273,7 +1273,7 @@ _gcry_aes_ocb_auth_armv8_ce:
   CLEAR_REG(v16)
 
   ret
-.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;
+ELF(.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;)
 
 
 /*
@@ -1285,7 +1285,7 @@ _gcry_aes_ocb_auth_armv8_ce:
 
 .align 3
 .globl _gcry_aes_xts_enc_armv8_ce
-.type  _gcry_aes_xts_enc_armv8_ce,%function;
+ELF(.type  _gcry_aes_xts_enc_armv8_ce,%function;)
 _gcry_aes_xts_enc_armv8_ce:
   /* input:
    *    r0: keysched
@@ -1410,7 +1410,7 @@ _gcry_aes_xts_enc_armv8_ce:
 .Lxts_enc_skip:
   ret
 
-.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;
+ELF(.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;)
 
 
 /*
@@ -1422,7 +1422,7 @@ _gcry_aes_xts_enc_armv8_ce:
 
 .align 3
 .globl _gcry_aes_xts_dec_armv8_ce
-.type  _gcry_aes_xts_dec_armv8_ce,%function;
+ELF(.type  _gcry_aes_xts_dec_armv8_ce,%function;)
 _gcry_aes_xts_dec_armv8_ce:
   /* input:
    *    r0: keysched
@@ -1547,7 +1547,7 @@ _gcry_aes_xts_dec_armv8_ce:
 .Lxts_dec_skip:
   ret
 
-.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;
+ELF(.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;)
 
 
 /*
@@ -1555,7 +1555,7 @@ _gcry_aes_xts_dec_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_sbox4_armv8_ce
-.type  _gcry_aes_sbox4_armv8_ce,%function;
+ELF(.type  _gcry_aes_sbox4_armv8_ce,%function;)
 _gcry_aes_sbox4_armv8_ce:
   /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
    * Cryptology — CT-RSA 2015" for details.
@@ -1568,7 +1568,7 @@ _gcry_aes_sbox4_armv8_ce:
   mov w0, v0.S[0]
   CLEAR_REG(v0)
   ret
-.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;
+ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;)
 
 
 /*
@@ -1576,13 +1576,13 @@ _gcry_aes_sbox4_armv8_ce:
  */
 .align 3
 .globl _gcry_aes_invmixcol_armv8_ce
-.type  _gcry_aes_invmixcol_armv8_ce,%function;
+ELF(.type  _gcry_aes_invmixcol_armv8_ce,%function;)
 _gcry_aes_invmixcol_armv8_ce:
   ld1 {v0.16b}, [x1]
   aesimc v0.16b, v0.16b
   st1 {v0.16b}, [x0]
   CLEAR_REG(v0)
   ret
-.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;
+ELF(.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;)
 
 #endif
diff --git a/cipher/sha1-armv8-aarch64-ce.S b/cipher/sha1-armv8-aarch64-ce.S
index ec1810d..aeb67a1 100644
--- a/cipher/sha1-armv8-aarch64-ce.S
+++ b/cipher/sha1-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -103,7 +103,7 @@ gcry_sha1_aarch64_ce_K_VEC:
  */
 .align 3
 .globl _gcry_sha1_transform_armv8_ce
-.type  _gcry_sha1_transform_armv8_ce,%function;
+ELF(.type  _gcry_sha1_transform_armv8_ce,%function;)
 _gcry_sha1_transform_armv8_ce:
   /* input:
    *	x0: ctx, CTX
@@ -199,6 +199,6 @@ _gcry_sha1_transform_armv8_ce:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;
+ELF(.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;)
 
 #endif
diff --git a/cipher/sha256-armv8-aarch64-ce.S b/cipher/sha256-armv8-aarch64-ce.S
index a4575da..6b3ad32 100644
--- a/cipher/sha256-armv8-aarch64-ce.S
+++ b/cipher/sha256-armv8-aarch64-ce.S
@@ -17,7 +17,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__) && \
     defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
@@ -113,7 +113,7 @@ gcry_sha256_aarch64_ce_K:
  */
 .align 3
 .globl _gcry_sha256_transform_armv8_ce
-.type  _gcry_sha256_transform_armv8_ce,%function;
+ELF(.type  _gcry_sha256_transform_armv8_ce,%function;)
 _gcry_sha256_transform_armv8_ce:
   /* input:
    *	r0: ctx, CTX
@@ -213,6 +213,6 @@ _gcry_sha256_transform_armv8_ce:
 .Ldo_nothing:
   mov x0, #0
   ret
-.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;
+ELF(.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;)
 
 #endif
diff --git a/cipher/twofish-aarch64.S b/cipher/twofish-aarch64.S
index 99c4675..adee412 100644
--- a/cipher/twofish-aarch64.S
+++ b/cipher/twofish-aarch64.S
@@ -18,7 +18,7 @@
  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <config.h>
+#include "asm-common-aarch64.h"
 
 #if defined(__AARCH64EL__)
 #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
@@ -217,7 +217,7 @@
 	ror1(RD);
 
 .globl _gcry_twofish_arm_encrypt_block
-.type   _gcry_twofish_arm_encrypt_block,%function;
+ELF(.type   _gcry_twofish_arm_encrypt_block,%function;)
 
 _gcry_twofish_arm_encrypt_block:
 	/* input:
@@ -263,10 +263,10 @@ _gcry_twofish_arm_encrypt_block:
 
 	ret;
 .ltorg
-.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;
+ELF(.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;)
 
 .globl _gcry_twofish_arm_decrypt_block
-.type   _gcry_twofish_arm_decrypt_block,%function;
+ELF(.type   _gcry_twofish_arm_decrypt_block,%function;)
 
 _gcry_twofish_arm_decrypt_block:
 	/* input:
@@ -311,7 +311,7 @@ _gcry_twofish_arm_decrypt_block:
 	str_output_le(RDST, RA, RB, RC, RD, RT0, RT1);
 
 	ret;
-.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;
+ELF(.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;)
 
 #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/
 #endif /*__AARCH64EL__*/
diff --git a/configure.ac b/configure.ac
index b5d7211..330485f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1119,10 +1119,6 @@ AC_CACHE_CHECK([whether GCC assembler is compatible for ARMv8/Aarch64 assembly i
                 "eor x0, x0, x30, ror #12;\n\t"
                 "add x0, x0, x30, asr #12;\n\t"
                 "eor v0.16b, v0.16b, v31.16b;\n\t"
-
-                /* Test if '.type' and '.size' are supported.  */
-                ".size asmfunc,.-asmfunc;\n\t"
-                ".type asmfunc, at function;\n\t"
             );]])],
           [gcry_cv_gcc_aarch64_platform_as_ok=yes])])
 if test "$gcry_cv_gcc_aarch64_platform_as_ok" = "yes" ; then

commit 4e1b628f492643d4e9b830bcdab7b49daaec5854
Author: Martin Storsjö <martin at martin.st>
Date:   Thu Mar 22 23:32:39 2018 +0200

    aarch64: camellia: Only use the lower 32 bit of an int parameter
    
    * cipher/camellia-aarch64.S: Use 'w3' instead of 'x3'.
    --
    The keybits parameter is declared as int, and in those cases, the
    upper half of a register is undefined, not guaranteed to be zero.
    
    Signed-off-by: Martin Storsjö <martin at martin.st>

diff --git a/cipher/camellia-aarch64.S b/cipher/camellia-aarch64.S
index 440f69f..68d2a7d 100644
--- a/cipher/camellia-aarch64.S
+++ b/cipher/camellia-aarch64.S
@@ -33,7 +33,7 @@
 #define CTX x0
 #define RDST x1
 #define RSRC x2
-#define RKEYBITS x3
+#define RKEYBITS w3
 
 #define RTAB1 x4
 #define RTAB2 x5

commit 36e916fc332eda74963192b1c0bf6860a3e5d67b
Author: Martin Storsjö <martin at martin.st>
Date:   Thu Mar 22 23:32:38 2018 +0200

    aarch64: Fix assembling chacha20-aarch64.S with clang/llvm
    
    * cipher/chacha20-aarch64.S: Remove superfluous lane counts.
    --
    When referring to a specific lane, one doesn't need to specify
    the total number of lanes of the register. With GNU binutils,
    both forms are accepted, while clang/llvm rejects the form
    with the unnecessary number of lanes.
    
    Signed-off-by: Martin Storsjö <martin at martin.st>

diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index 739ddde..5990a08 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -170,27 +170,27 @@ _gcry_chacha20_aarch64_blocks4:
 	mov ROUND, #20;
 	ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS];
 
-	dup X12.4s, X15.4s[0];
-	dup X13.4s, X15.4s[1];
+	dup X12.4s, X15.s[0];
+	dup X13.4s, X15.s[1];
 	ldr CTR, [INPUT_CTR];
 	add X12.4s, X12.4s, VCTR.4s;
-	dup X0.4s, VTMP1.4s[0];
-	dup X1.4s, VTMP1.4s[1];
-	dup X2.4s, VTMP1.4s[2];
-	dup X3.4s, VTMP1.4s[3];
-	dup X14.4s, X15.4s[2];
+	dup X0.4s, VTMP1.s[0];
+	dup X1.4s, VTMP1.s[1];
+	dup X2.4s, VTMP1.s[2];
+	dup X3.4s, VTMP1.s[3];
+	dup X14.4s, X15.s[2];
 	cmhi VTMP0.4s, VCTR.4s, X12.4s;
-	dup X15.4s, X15.4s[3];
+	dup X15.4s, X15.s[3];
 	add CTR, CTR, #4; /* Update counter */
-	dup X4.4s, VTMP2.4s[0];
-	dup X5.4s, VTMP2.4s[1];
-	dup X6.4s, VTMP2.4s[2];
-	dup X7.4s, VTMP2.4s[3];
+	dup X4.4s, VTMP2.s[0];
+	dup X5.4s, VTMP2.s[1];
+	dup X6.4s, VTMP2.s[2];
+	dup X7.4s, VTMP2.s[3];
 	sub X13.4s, X13.4s, VTMP0.4s;
-	dup X8.4s, VTMP3.4s[0];
-	dup X9.4s, VTMP3.4s[1];
-	dup X10.4s, VTMP3.4s[2];
-	dup X11.4s, VTMP3.4s[3];
+	dup X8.4s, VTMP3.s[0];
+	dup X9.4s, VTMP3.s[1];
+	dup X10.4s, VTMP3.s[2];
+	dup X11.4s, VTMP3.s[3];
 	mov X12_TMP.16b, X12.16b;
 	mov X13_TMP.16b, X13.16b;
 	str CTR, [INPUT_CTR];
@@ -208,19 +208,19 @@ _gcry_chacha20_aarch64_blocks4:
 	PLUS(X12, X12_TMP);        /* INPUT + 12 * 4 + counter */
 	PLUS(X13, X13_TMP);        /* INPUT + 13 * 4 + counter */
 
-	dup VTMP2.4s, VTMP0.4s[0]; /* INPUT + 0 * 4 */
-	dup VTMP3.4s, VTMP0.4s[1]; /* INPUT + 1 * 4 */
-	dup X12_TMP.4s, VTMP0.4s[2]; /* INPUT + 2 * 4 */
-	dup X13_TMP.4s, VTMP0.4s[3]; /* INPUT + 3 * 4 */
+	dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */
+	dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */
+	dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */
+	dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */
 	PLUS(X0, VTMP2);
 	PLUS(X1, VTMP3);
 	PLUS(X2, X12_TMP);
 	PLUS(X3, X13_TMP);
 
-	dup VTMP2.4s, VTMP1.4s[0]; /* INPUT + 4 * 4 */
-	dup VTMP3.4s, VTMP1.4s[1]; /* INPUT + 5 * 4 */
-	dup X12_TMP.4s, VTMP1.4s[2]; /* INPUT + 6 * 4 */
-	dup X13_TMP.4s, VTMP1.4s[3]; /* INPUT + 7 * 4 */
+	dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */
+	dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */
+	dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */
+	dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */
 	ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS];
 	mov INPUT_POS, INPUT;
 	PLUS(X4, VTMP2);
@@ -228,12 +228,12 @@ _gcry_chacha20_aarch64_blocks4:
 	PLUS(X6, X12_TMP);
 	PLUS(X7, X13_TMP);
 
-	dup VTMP2.4s, VTMP0.4s[0]; /* INPUT + 8 * 4 */
-	dup VTMP3.4s, VTMP0.4s[1]; /* INPUT + 9 * 4 */
-	dup X12_TMP.4s, VTMP0.4s[2]; /* INPUT + 10 * 4 */
-	dup X13_TMP.4s, VTMP0.4s[3]; /* INPUT + 11 * 4 */
-	dup VTMP0.4s, VTMP1.4s[2]; /* INPUT + 14 * 4 */
-	dup VTMP1.4s, VTMP1.4s[3]; /* INPUT + 15 * 4 */
+	dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */
+	dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */
+	dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */
+	dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */
+	dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */
+	dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */
 	PLUS(X8, VTMP2);
 	PLUS(X9, VTMP3);
 	PLUS(X10, X12_TMP);

commit ec0a2f25c0f64a7b65b373508ce9081e10461965
Author: Martin Storsjö <martin at martin.st>
Date:   Thu Mar 22 23:32:37 2018 +0200

    aarch64: mpi: Fix building the mpi aarch64 assembly for windows
    
    * mpi/aarch64/mpih-add1.S: Use ELF macro.
    * mpi/aarch64/mpih-mul1.S: Use ELF macro.
    * mpi/aarch64/mpih-mul2.S: Use ELF macro.
    * mpi/aarch64/mpih-mul3.S: Use ELF macro.
    * mpi/aarch64/mpih-sub1.S: Use ELF macro.
    * mpi/asm-common-aarch64.h: New.
    --
    
    The mpi aarch64 assembly is enabled as soon as the compiler supports
    inline assembly, without checking for .type and .size, as is done
    for the rest of the assembly in cipher/*.S. (The .type and .size
    directives are only supported on ELF.)
    
    Signed-off-by: Martin Storsjö <martin at martin.st>

diff --git a/mpi/aarch64/mpih-add1.S b/mpi/aarch64/mpih-add1.S
index fa8cd01..4ead1c2 100644
--- a/mpi/aarch64/mpih-add1.S
+++ b/mpi/aarch64/mpih-add1.S
@@ -22,6 +22,7 @@
 
 #include "sysdep.h"
 #include "asm-syntax.h"
+#include "asm-common-aarch64.h"
 
 /*******************
  *  mpi_limb_t
@@ -34,7 +35,7 @@
 .text
 
 .globl _gcry_mpih_add_n
-.type  _gcry_mpih_add_n,%function
+ELF(.type  _gcry_mpih_add_n,%function)
 _gcry_mpih_add_n:
 	and	x5, x3, #3;
 	adds	xzr, xzr, xzr; /* clear carry flag */
@@ -68,4 +69,4 @@ _gcry_mpih_add_n:
 .Lend:
 	adc	x0, xzr, xzr;
 	ret;
-.size _gcry_mpih_add_n,.-_gcry_mpih_add_n;
+ELF(.size _gcry_mpih_add_n,.-_gcry_mpih_add_n;)
diff --git a/mpi/aarch64/mpih-mul1.S b/mpi/aarch64/mpih-mul1.S
index 65e98fe..8a86269 100644
--- a/mpi/aarch64/mpih-mul1.S
+++ b/mpi/aarch64/mpih-mul1.S
@@ -22,6 +22,7 @@
 
 #include "sysdep.h"
 #include "asm-syntax.h"
+#include "asm-common-aarch64.h"
 
 /*******************
  * mpi_limb_t
@@ -34,7 +35,7 @@
 .text
 
 .globl _gcry_mpih_mul_1
-.type  _gcry_mpih_mul_1,%function
+ELF(.type  _gcry_mpih_mul_1,%function)
 _gcry_mpih_mul_1:
 	and	x5, x2, #3;
 	mov	x4, xzr;
@@ -93,4 +94,4 @@ _gcry_mpih_mul_1:
 .Lend:
 	mov	x0, x4;
 	ret;
-.size _gcry_mpih_mul_1,.-_gcry_mpih_mul_1;
+ELF(.size _gcry_mpih_mul_1,.-_gcry_mpih_mul_1;)
diff --git a/mpi/aarch64/mpih-mul2.S b/mpi/aarch64/mpih-mul2.S
index bd3b2c9..c7c08e5 100644
--- a/mpi/aarch64/mpih-mul2.S
+++ b/mpi/aarch64/mpih-mul2.S
@@ -22,6 +22,7 @@
 
 #include "sysdep.h"
 #include "asm-syntax.h"
+#include "asm-common-aarch64.h"
 
 /*******************
  * mpi_limb_t
@@ -34,7 +35,7 @@
 .text
 
 .globl _gcry_mpih_addmul_1
-.type  _gcry_mpih_addmul_1,%function
+ELF(.type  _gcry_mpih_addmul_1,%function)
 _gcry_mpih_addmul_1:
 	and	x5, x2, #3;
 	mov	x6, xzr;
@@ -105,4 +106,4 @@ _gcry_mpih_addmul_1:
 .Lend:
 	mov	x0, x6;
 	ret;
-.size _gcry_mpih_addmul_1,.-_gcry_mpih_addmul_1;
+ELF(.size _gcry_mpih_addmul_1,.-_gcry_mpih_addmul_1;)
diff --git a/mpi/aarch64/mpih-mul3.S b/mpi/aarch64/mpih-mul3.S
index a58bc53..ccc961e 100644
--- a/mpi/aarch64/mpih-mul3.S
+++ b/mpi/aarch64/mpih-mul3.S
@@ -22,6 +22,7 @@
 
 #include "sysdep.h"
 #include "asm-syntax.h"
+#include "asm-common-aarch64.h"
 
 /*******************
  * mpi_limb_t
@@ -34,7 +35,7 @@
 .text
 
 .globl _gcry_mpih_submul_1
-.type  _gcry_mpih_submul_1,%function
+ELF(.type  _gcry_mpih_submul_1,%function)
 _gcry_mpih_submul_1:
 	and	x5, x2, #3;
 	mov	x7, xzr;
@@ -118,4 +119,4 @@ _gcry_mpih_submul_1:
 .Loop_end:
 	cinc	x0, x7, cc;
 	ret;
-.size _gcry_mpih_submul_1,.-_gcry_mpih_submul_1;
+ELF(.size _gcry_mpih_submul_1,.-_gcry_mpih_submul_1;)
diff --git a/mpi/aarch64/mpih-sub1.S b/mpi/aarch64/mpih-sub1.S
index cbf2f08..4a66373 100644
--- a/mpi/aarch64/mpih-sub1.S
+++ b/mpi/aarch64/mpih-sub1.S
@@ -22,6 +22,7 @@
 
 #include "sysdep.h"
 #include "asm-syntax.h"
+#include "asm-common-aarch64.h"
 
 /*******************
  *  mpi_limb_t
@@ -34,7 +35,7 @@
 .text
 
 .globl _gcry_mpih_sub_n
-.type  _gcry_mpih_sub_n,%function
+ELF(.type  _gcry_mpih_sub_n,%function)
 _gcry_mpih_sub_n:
 	and	x5, x3, #3;
 	subs	xzr, xzr, xzr; /* prepare carry flag for sub */
@@ -68,4 +69,4 @@ _gcry_mpih_sub_n:
 .Lend:
 	cset	x0, cc;
 	ret;
-.size _gcry_mpih_sub_n,.-_gcry_mpih_sub_n;
+ELF(.size _gcry_mpih_sub_n,.-_gcry_mpih_sub_n;)
diff --git a/mpi/asm-common-aarch64.h b/mpi/asm-common-aarch64.h
new file mode 100644
index 0000000..1269413
--- /dev/null
+++ b/mpi/asm-common-aarch64.h
@@ -0,0 +1,30 @@
+/* asm-common-aarch64.h  -  Common macros for AArch64 assembly
+ *
+ * Copyright (C) 2018 Martin Storsjö <martin at martin.st>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MPI_ASM_COMMON_AARCH64_H
+#define MPI_ASM_COMMON_AARCH64_H
+
+#ifdef __ELF__
+# define ELF(...) __VA_ARGS__
+#else
+# define ELF(...) /*_*/
+#endif
+
+#endif /* MPI_ASM_COMMON_AARCH64_H */

commit ed41d6d6fb4551342b22ef763de1bd60e964e186
Author: Martin Storsjö <martin at martin.st>
Date:   Thu Mar 22 23:32:36 2018 +0200

    random: Don't assume that _WIN64 implies x86_64
    
    * random/rndw32.c: Change _WIN64 ifdef into __x86_64__.
    --
    
    This fixes building this file for windows on aarch64.
    
    Signed-off-by: Martin Storsjö <martin at martin.st>

diff --git a/random/rndw32.c b/random/rndw32.c
index 7e9ac50..08a8867 100644
--- a/random/rndw32.c
+++ b/random/rndw32.c
@@ -986,7 +986,7 @@ _gcry_rndw32_gather_random_fast (void (*add)(const void*, size_t,
 
      On AMD64, TSC is always available and intrinsic is provided for accessing
      it.  */
-#ifdef __WIN64__
+#ifdef __x86_64__
     {
       unsigned __int64 aint64;
 
@@ -1024,7 +1024,7 @@ _gcry_rndw32_gather_random_fast (void (*add)(const void*, size_t,
           (*add) (&aword, sizeof (aword), origin );
         }
     }
-#endif /*__WIN64__*/
+#endif /*__x86_64__*/
 
 
 }

commit 8ee38806245ca8452051b1a245f44082323f37f6
Author: Jussi Kivilinna <jussi.kivilinna at iki.fi>
Date:   Wed Mar 28 20:32:56 2018 +0300

    Register DCO for Martin Storsjö
    
    --
    
    Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>

diff --git a/AUTHORS b/AUTHORS
index 8c553e6..49ab941 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -172,6 +172,9 @@ Jussi Kivilinna <jussi.kivilinna at iki.fi>
 Markus Teich <markus dot teich at stusta dot mhn dot de>
 2014-10-08:20141008180509.GA2770 at trolle:
 
+Martin Storsjö <martin at martin.st>
+2018-03-28:dc1605ce-a47d-34c5-8851-d9569f9ea5d3 at martin.st:
+
 Mathias L. Baumann <mathias.baumann at sociomantic.com>
 2017-01-30:07c06d79-0828-b564-d604-fd16c7c86ebe at sociomantic.com:
 

-----------------------------------------------------------------------

Summary of changes:
 AUTHORS                                         |  3 ++
 mpi/ec-ed25519.c => cipher/asm-common-aarch64.h | 29 +++++-----
 cipher/camellia-aarch64.S                       | 25 +++++----
 cipher/chacha20-aarch64.S                       | 72 +++++++++++++------------
 cipher/cipher-gcm-armv8-aarch64-ce.S            | 10 ++--
 cipher/rijndael-aarch64.S                       | 10 ++--
 cipher/rijndael-armv8-aarch64-ce.S              | 58 ++++++++++----------
 cipher/sha1-armv8-aarch64-ce.S                  |  6 +--
 cipher/sha256-armv8-aarch64-ce.S                |  6 +--
 cipher/twofish-aarch64.S                        | 10 ++--
 configure.ac                                    |  4 --
 mpi/aarch64/mpih-add1.S                         |  5 +-
 mpi/aarch64/mpih-mul1.S                         |  5 +-
 mpi/aarch64/mpih-mul2.S                         |  5 +-
 mpi/aarch64/mpih-mul3.S                         |  5 +-
 mpi/aarch64/mpih-sub1.S                         |  5 +-
 mpi/{ec-internal.h => asm-common-aarch64.h}     | 17 +++---
 random/rndw32.c                                 |  4 +-
 18 files changed, 147 insertions(+), 132 deletions(-)
 copy mpi/ec-ed25519.c => cipher/asm-common-aarch64.h (68%)
 copy mpi/{ec-internal.h => asm-common-aarch64.h} (69%)


hooks/post-receive
-- 
The GNU crypto library
http://git.gnupg.org


_______________________________________________
Gnupg-commits mailing list
Gnupg-commits at gnupg.org
http://lists.gnupg.org/mailman/listinfo/gnupg-commits


More information about the Gcrypt-devel mailing list